From ba324a542449d5092621dda960d2959e9cf9458a Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Fri, 2 Sep 2022 19:31:53 +0800 Subject: [PATCH 01/25] YARN-11290. Improve Query Condition of FederationStateStore#getApplicationsHomeSubCluster. --- .../MySQL/FederationStateStoreStoredProcs.sql | 10 ++- .../hadoop/yarn/conf/YarnConfiguration.java | 5 ++ .../impl/MemoryFederationStateStore.java | 37 ++++++-- .../store/impl/SQLFederationStateStore.java | 21 ++++- .../impl/ZookeeperFederationStateStore.java | 29 ++++++- .../GetApplicationsHomeSubClusterRequest.java | 30 +++++++ ...plicationsHomeSubClusterRequestPBImpl.java | 36 ++++++++ .../proto/yarn_server_federation_protos.proto | 2 +- .../impl/FederationStateStoreBaseTest.java | 85 +++++++++++++++++++ .../impl/HSQLDBFederationStateStore.java | 9 +- .../impl/TestMemoryFederationStateStore.java | 2 + .../impl/TestSQLFederationStateStore.java | 1 + .../TestZookeeperFederationStateStore.java | 1 + 13 files changed, 250 insertions(+), 18 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql index eae882e4a48dd..ff755c5f56614 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql @@ -122,10 +122,14 @@ BEGIN WHERE applicationId = applicationID_IN; END // -CREATE PROCEDURE sp_getApplicationsHomeSubCluster() +CREATE PROCEDURE sp_getApplicationsHomeSubCluster(IN limit_IN int) BEGIN - SELECT applicationId, homeSubCluster - FROM applicationsHomeSubCluster; + SELECT + applicationId, homeSubCluster + FROM + (SELECT + applicationId, homeSubCluster, ROW_NUMBER() OVER (ORDER BY applicationId) AS row_num + FROM applicationsHomeSubCluster) WHERE row_num <= limit_IN END // CREATE PROCEDURE sp_deleteApplicationHomeSubCluster( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index cc2c10cd2f421..fcf0bc09a93fc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -4004,6 +4004,11 @@ public static boolean isAclEnabled(Configuration conf) { public static final int DEFAULT_FEDERATION_STATESTORE_SQL_MAXCONNECTIONS = 1; + public static final String FEDERATION_STATESTORE_MAX_APPLICATIONS = + FEDERATION_PREFIX + "state-store.max-applications"; + + public static final int DEFAULT_FEDERATION_STATESTORE_MAX_APPLICATIONS = 1000; + public static final String ROUTER_PREFIX = YARN_PREFIX + "router."; public static final String ROUTER_BIND_HOST = ROUTER_PREFIX + "bind-host"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java index 920b8e8912d1e..a1c17ce6545c8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java @@ -24,10 +24,12 @@ import java.util.Map.Entry; import java.util.TimeZone; import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ReservationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest; @@ -90,6 +92,7 @@ public class MemoryFederationStateStore implements FederationStateStore { private Map applications; private Map reservations; private Map policies; + private int maxAppsInStateStore; private final MonotonicClock clock = new MonotonicClock(); @@ -102,6 +105,9 @@ public void init(Configuration conf) { applications = new ConcurrentHashMap(); reservations = new ConcurrentHashMap(); policies = new ConcurrentHashMap(); + maxAppsInStateStore = conf.getInt( + YarnConfiguration.FEDERATION_STATESTORE_MAX_APPLICATIONS, + YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_MAX_APPLICATIONS); } @Override @@ -255,14 +261,33 @@ public GetApplicationHomeSubClusterResponse getApplicationHomeSubCluster( @Override public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( GetApplicationsHomeSubClusterRequest request) throws YarnException { - List result = - new ArrayList(); - for (Entry e : applications.entrySet()) { - result - .add(ApplicationHomeSubCluster.newInstance(e.getKey(), e.getValue())); + + if (request == null) { + throw new YarnException("Missing getApplicationsHomeSubCluster request"); + } + + List result = new ArrayList<>(); + List applicationIdList = + applications.keySet().stream().collect(Collectors.toList()); + + SubClusterId requestSubClusterId = request.getSubClusterId(); + int appCount = 0; + for (int i = 0; i < applicationIdList.size(); i++) { + if (appCount >= maxAppsInStateStore) { + break; + } + ApplicationId applicationId = applicationIdList.get(i); + SubClusterId subClusterId = applications.get(applicationId); + // If the requestSubClusterId that needs to be filtered in the request + // is inconsistent with the SubClusterId in the data, continue to the next round + if (requestSubClusterId != null && !requestSubClusterId.equals(subClusterId)){ + continue; + } + result.add(ApplicationHomeSubCluster.newInstance(applicationId, subClusterId)); + appCount++; } - GetApplicationsHomeSubClusterResponse.newInstance(result); + LOG.info("requestSubClusterId = {}, appCount = {}.", requestSubClusterId, appCount); return GetApplicationsHomeSubClusterResponse.newInstance(result); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java index dffcfa6a10ee9..dc2d731e89b8e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java @@ -129,7 +129,7 @@ public class SQLFederationStateStore implements FederationStateStore { "{call sp_getApplicationHomeSubCluster(?, ?)}"; private static final String CALL_SP_GET_APPLICATIONS_HOME_SUBCLUSTER = - "{call sp_getApplicationsHomeSubCluster()}"; + "{call sp_getApplicationsHomeSubCluster(?, ?)}"; private static final String CALL_SP_SET_POLICY_CONFIGURATION = "{call sp_setPolicyConfiguration(?, ?, ?, ?)}"; @@ -154,6 +154,7 @@ public class SQLFederationStateStore implements FederationStateStore { private final Clock clock = new MonotonicClock(); @VisibleForTesting Connection conn = null; + private int maxAppsInStateStore; @Override public void init(Configuration conf) throws YarnException { @@ -193,6 +194,10 @@ public void init(Configuration conf) throws YarnException { FederationStateStoreUtils.logAndThrowRetriableException(LOG, "Not able to get Connection", e); } + + maxAppsInStateStore = conf.getInt( + YarnConfiguration.FEDERATION_STATESTORE_MAX_APPLICATIONS, + YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_MAX_APPLICATIONS); } @Override @@ -726,13 +731,23 @@ public GetApplicationHomeSubClusterResponse getApplicationHomeSubCluster( @Override public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( GetApplicationsHomeSubClusterRequest request) throws YarnException { + + if (request == null) { + throw new YarnException("Missing getApplicationsHomeSubCluster request"); + } + CallableStatement cstmt = null; ResultSet rs = null; - List appsHomeSubClusters = - new ArrayList(); + List appsHomeSubClusters = new ArrayList<>(); try { cstmt = getCallableStatement(CALL_SP_GET_APPLICATIONS_HOME_SUBCLUSTER); + cstmt.setInt("limit_IN", maxAppsInStateStore); + String homeSubClusterIN = null;; + if (request.getSubClusterId() != null) { + homeSubClusterIN = request.getSubClusterId().toString(); + } + cstmt.setString("homeSubCluster_IN", homeSubClusterIN); // Execute the query long startTime = clock.getTime(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java index d710dacd54087..73d0ba71f6553 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java @@ -133,6 +133,7 @@ public class ZookeeperFederationStateStore implements FederationStateStore { private String membershipZNode; private String policiesZNode; private String reservationsZNode; + private int maxAppsInStateStore; private volatile Clock clock = SystemClock.getInstance(); @@ -144,6 +145,10 @@ public class ZookeeperFederationStateStore implements FederationStateStore { public void init(Configuration conf) throws YarnException { LOG.info("Initializing ZooKeeper connection"); + maxAppsInStateStore = conf.getInt( + YarnConfiguration.FEDERATION_STATESTORE_MAX_APPLICATIONS, + YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_MAX_APPLICATIONS); + baseZNode = conf.get( YarnConfiguration.FEDERATION_STATESTORE_ZK_PARENT_PATH, YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_ZK_PARENT_PATH); @@ -255,16 +260,32 @@ public GetApplicationHomeSubClusterResponse getApplicationHomeSubCluster( @Override public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( GetApplicationsHomeSubClusterRequest request) throws YarnException { + + if (request == null) { + throw new YarnException("Missing getApplicationsHomeSubCluster request"); + } + long start = clock.getTime(); List result = new ArrayList<>(); + SubClusterId requestSubClusterId = request.getSubClusterId(); + int appCount = 0; try { - for (String child : zkManager.getChildren(appsZNode)) { + List childrens = zkManager.getChildren(appsZNode); + for (String child : childrens) { + if (appCount >= maxAppsInStateStore) { + break; + } ApplicationId appId = ApplicationId.fromString(child); SubClusterId homeSubCluster = getApp(appId); - ApplicationHomeSubCluster app = - ApplicationHomeSubCluster.newInstance(appId, homeSubCluster); + // If the requestSubClusterId that needs to be filtered in the request + // is inconsistent with the SubClusterId in the data, continue to the next round + if (requestSubClusterId != null && !requestSubClusterId.equals(homeSubCluster)) { + continue; + } + ApplicationHomeSubCluster app = ApplicationHomeSubCluster.newInstance(appId, homeSubCluster); result.add(app); + appCount ++; } } catch (Exception e) { String errMsg = "Cannot get apps: " + e.getMessage(); @@ -272,6 +293,8 @@ public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( } long end = clock.getTime(); opDurations.addGetAppsHomeSubClusterDuration(start, end); + + LOG.info("requestSubClusterId = {}, appCount = {}.", requestSubClusterId, appCount); return GetApplicationsHomeSubClusterResponse.newInstance(result); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java index 60549722093db..0df62d26d7ae7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java @@ -17,6 +17,7 @@ package org.apache.hadoop.yarn.server.federation.store.records; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.Records; @@ -37,4 +38,33 @@ public static GetApplicationsHomeSubClusterRequest newInstance() { return request; } + @Private + @Unstable + public static GetApplicationsHomeSubClusterRequest + newInstance(SubClusterId subClusterId) { + GetApplicationsHomeSubClusterRequest request = + Records.newRecord(GetApplicationsHomeSubClusterRequest.class); + request.setSubClusterId(subClusterId); + return request; + } + + /** + * Get the {@link SubClusterId} representing the unique identifier of the + * subcluster. + * + * @return the subcluster identifier + */ + @InterfaceAudience.Public + @Unstable + public abstract SubClusterId getSubClusterId(); + + /** + * Set the {@link SubClusterId} representing the unique identifier of the + * subcluster. + * + * @param subClusterId the subcluster identifier + */ + @InterfaceAudience.Public + @Unstable + public abstract void setSubClusterId(SubClusterId subClusterId); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterRequestPBImpl.java index a3c1c1a6bb5a1..1a75044cff3b5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterRequestPBImpl.java @@ -19,10 +19,13 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos; import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetApplicationsHomeSubClusterRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetApplicationsHomeSubClusterRequestProtoOrBuilder; import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterRequest; import org.apache.hadoop.thirdparty.protobuf.TextFormat; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; /** * Protocol buffer based implementation of @@ -75,4 +78,37 @@ public String toString() { return TextFormat.shortDebugString(getProto()); } + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetApplicationsHomeSubClusterRequestProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public SubClusterId getSubClusterId() { + GetApplicationsHomeSubClusterRequestProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasSubClusterId()) { + return null; + } + return convertFromProtoFormat(p.getSubClusterId()); + } + + @Override + public void setSubClusterId(SubClusterId subClusterId) { + maybeInitBuilder(); + if (subClusterId == null) { + builder.clearSubClusterId(); + return; + } + builder.setSubClusterId(convertToProtoFormat(subClusterId)); + } + + private SubClusterId convertFromProtoFormat(YarnServerFederationProtos.SubClusterIdProto sc) { + return new SubClusterIdPBImpl(sc); + } + + private YarnServerFederationProtos.SubClusterIdProto convertToProtoFormat(SubClusterId sc) { + return ((SubClusterIdPBImpl) sc).getProto(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_federation_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_federation_protos.proto index 33f5cb3fc14e0..7c23e9a3be85b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_federation_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_federation_protos.proto @@ -122,7 +122,7 @@ message GetApplicationHomeSubClusterResponseProto { } message GetApplicationsHomeSubClusterRequestProto { - + optional SubClusterIdProto sub_cluster_id = 1; } message GetApplicationsHomeSubClusterResponseProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java index 296e4846ea4c8..af5f8e61be040 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java @@ -22,6 +22,8 @@ import java.util.Calendar; import java.util.List; import java.util.TimeZone; +import java.util.Set; +import java.util.HashSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.test.LambdaTestUtils; @@ -410,6 +412,89 @@ public void testGetApplicationsHomeSubCluster() throws Exception { Assert.assertTrue(result.getAppsHomeSubClusters().contains(ahsc2)); } + @Test + public void testGetApplicationsHomeSubClusterEmpty() throws Exception { + LambdaTestUtils.intercept(YarnException.class, + "Missing getApplicationsHomeSubCluster request", + () -> stateStore.getApplicationsHomeSubCluster(null)); + } + + @Test + public void testGetApplicationsHomeSubClusterFilter() throws Exception { + // Add ApplicationHomeSC - SC1 + long now = Time.now(); + + Set appHomeSubClusters = new HashSet<>(); + + for (int i = 0; i < 10; i++) { + ApplicationId appId = ApplicationId.newInstance(now, i); + SubClusterId subClusterId = SubClusterId.newInstance("SC1"); + addApplicationHomeSC(appId, subClusterId); + ApplicationHomeSubCluster ahsc = + ApplicationHomeSubCluster.newInstance(appId, subClusterId); + appHomeSubClusters.add(ahsc); + } + + // Add ApplicationHomeSC - SC2 + for (int i = 10; i < 20; i++) { + ApplicationId appId = ApplicationId.newInstance(now, i); + SubClusterId subClusterId = SubClusterId.newInstance("SC2"); + addApplicationHomeSC(appId, subClusterId); + } + + GetApplicationsHomeSubClusterRequest getRequest = + GetApplicationsHomeSubClusterRequest.newInstance(); + getRequest.setSubClusterId(SubClusterId.newInstance("SC1")); + + GetApplicationsHomeSubClusterResponse result = + stateStore.getApplicationsHomeSubCluster(getRequest); + Assert.assertNotNull(result); + + List items = result.getAppsHomeSubClusters(); + Assert.assertNotNull(items); + Assert.assertEquals(10, items.size()); + + for (ApplicationHomeSubCluster item : items) { + Assert.assertTrue(appHomeSubClusters.contains(item)); + } + } + + @Test + public void testGetApplicationsHomeSubClusterLimit() throws Exception { + // Add ApplicationHomeSC - SC1 + long now = Time.now(); + + for (int i = 0; i < 50; i++) { + ApplicationId appId = ApplicationId.newInstance(now, i); + SubClusterId subClusterId = SubClusterId.newInstance("SC1"); + addApplicationHomeSC(appId, subClusterId); + } + + GetApplicationsHomeSubClusterRequest getRequest = + GetApplicationsHomeSubClusterRequest.newInstance(); + getRequest.setSubClusterId(SubClusterId.newInstance("SC1")); + GetApplicationsHomeSubClusterResponse result = + stateStore.getApplicationsHomeSubCluster(getRequest); + Assert.assertNotNull(result); + + // Write 50 records, but get 10 records because the maximum number is limited to 10 + List items = result.getAppsHomeSubClusters(); + Assert.assertNotNull(items); + Assert.assertEquals(10, items.size()); + + GetApplicationsHomeSubClusterRequest getRequest1 = + GetApplicationsHomeSubClusterRequest.newInstance(); + getRequest1.setSubClusterId(SubClusterId.newInstance("SC2")); + GetApplicationsHomeSubClusterResponse result1 = + stateStore.getApplicationsHomeSubCluster(getRequest1); + Assert.assertNotNull(result1); + + // SC2 data does not exist, so the number of returned records is 0 + List items1 = result1.getAppsHomeSubClusters(); + Assert.assertNotNull(items1); + Assert.assertEquals(0, items1.size()); + } + @Test public void testUpdateApplicationHomeSubCluster() throws Exception { ApplicationId appId = ApplicationId.newInstance(1, 1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java index c3d0a9e1bbd53..8fcef8d429377 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java @@ -22,6 +22,7 @@ import java.sql.SQLException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; import org.slf4j.Logger; @@ -164,11 +165,14 @@ public class HSQLDBFederationStateStore extends SQLFederationStateStore { + " WHERE applicationId = applicationID_IN; END"; private static final String SP_GETAPPLICATIONSHOMESUBCLUSTER = - "CREATE PROCEDURE sp_getApplicationsHomeSubCluster()" + "CREATE PROCEDURE sp_getApplicationsHomeSubCluster(IN limit_IN int, IN homeSubCluster_IN varchar(256))" + " MODIFIES SQL DATA DYNAMIC RESULT SETS 1 BEGIN ATOMIC" + " DECLARE result CURSOR FOR" + " SELECT applicationId, homeSubCluster" - + " FROM applicationsHomeSubCluster; OPEN result; END"; + + " FROM applicationsHomeSubCluster " + + " WHERE ROWNUM() <= limit_IN AND " + + " CASE WHEN homeSubCluster_IN IS NULL THEN 1 = 1 " + + " WHEN homeSubCluster_IN IS NOT NULL THEN homeSubCluster = homeSubCluster_IN END; OPEN result; END"; private static final String SP_DELETEAPPLICATIONHOMESUBCLUSTER = "CREATE PROCEDURE sp_deleteApplicationHomeSubCluster(" @@ -204,6 +208,7 @@ public class HSQLDBFederationStateStore extends SQLFederationStateStore { @Override public void init(Configuration conf) { try { + conf.setInt(YarnConfiguration.FEDERATION_STATESTORE_MAX_APPLICATIONS, 10); super.init(conf); } catch (YarnException e1) { LOG.error("ERROR: failed to init HSQLDB " + e1.getMessage()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestMemoryFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestMemoryFederationStateStore.java index c29fc03c5b698..70dda2227d0d2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestMemoryFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestMemoryFederationStateStore.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.federation.store.impl; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; /** @@ -29,6 +30,7 @@ public class TestMemoryFederationStateStore @Override protected FederationStateStore createStateStore() { Configuration conf = new Configuration(); + conf.setInt(YarnConfiguration.FEDERATION_STATESTORE_MAX_APPLICATIONS, 10); super.setConf(conf); return new MemoryFederationStateStore(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestSQLFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestSQLFederationStateStore.java index 72c820b0ed029..bd3c5994e8d64 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestSQLFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestSQLFederationStateStore.java @@ -52,6 +52,7 @@ protected FederationStateStore createStateStore() { DATABASE_PASSWORD); conf.set(YarnConfiguration.FEDERATION_STATESTORE_SQL_URL, DATABASE_URL + System.currentTimeMillis()); + conf.setInt(YarnConfiguration.FEDERATION_STATESTORE_MAX_APPLICATIONS, 10); super.setConf(conf); return new HSQLDBFederationStateStore(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestZookeeperFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestZookeeperFederationStateStore.java index 272394b6b285b..e1064be6240bf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestZookeeperFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestZookeeperFederationStateStore.java @@ -67,6 +67,7 @@ public void before() throws IOException, YarnException { Configuration conf = new YarnConfiguration(); conf.set(CommonConfigurationKeys.ZK_ADDRESS, connectString); + conf.setInt(YarnConfiguration.FEDERATION_STATESTORE_MAX_APPLICATIONS, 10); setConf(conf); } catch (Exception e) { LOG.error("Cannot initialize ZooKeeper store", e); From a1629b7b74d2f77035dee07e0a9cf4477ebe6d07 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Sat, 3 Sep 2022 00:39:13 -0700 Subject: [PATCH 02/25] YARN-11290. Fix CheckStyle. --- .../src/main/resources/yarn-default.xml | 10 +++ .../impl/MemoryFederationStateStore.java | 44 ++++++------- .../store/impl/SQLFederationStateStore.java | 11 ++-- .../impl/ZookeeperFederationStateStore.java | 61 +++++++++++-------- 4 files changed, 74 insertions(+), 52 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 9d95fd43c0c59..5cdbab4ac6164 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -4963,4 +4963,14 @@ + + yarn.federation.state-store.max-applications + 1000 + + yarn federation state-store supports + querying the maximum number of apps + Default is 1000 + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java index a1c17ce6545c8..9b36d5cc17a98 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java @@ -266,31 +266,31 @@ public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( throw new YarnException("Missing getApplicationsHomeSubCluster request"); } - List result = new ArrayList<>(); - List applicationIdList = - applications.keySet().stream().collect(Collectors.toList()); - - SubClusterId requestSubClusterId = request.getSubClusterId(); - int appCount = 0; - for (int i = 0; i < applicationIdList.size(); i++) { - if (appCount >= maxAppsInStateStore) { - break; - } - ApplicationId applicationId = applicationIdList.get(i); - SubClusterId subClusterId = applications.get(applicationId); - // If the requestSubClusterId that needs to be filtered in the request - // is inconsistent with the SubClusterId in the data, continue to the next round - if (requestSubClusterId != null && !requestSubClusterId.equals(subClusterId)){ - continue; - } - result.add(ApplicationHomeSubCluster.newInstance(applicationId, subClusterId)); - appCount++; - } - - LOG.info("requestSubClusterId = {}, appCount = {}.", requestSubClusterId, appCount); + SubClusterId requestSC = request.getSubClusterId(); + List result = applications.keySet().stream() + .map(applicationId -> generateAppHomeSC(applicationId)) + .filter(appHomeSC -> judgeAdd(requestSC, appHomeSC.getHomeSubCluster())) + .limit(maxAppsInStateStore) + .collect(Collectors.toList()); + + LOG.info("filterSubClusterId = {}, appCount = {}.", requestSC, result.size()); return GetApplicationsHomeSubClusterResponse.newInstance(result); } + private ApplicationHomeSubCluster generateAppHomeSC(ApplicationId applicationId) { + SubClusterId subClusterId = applications.get(applicationId); + return ApplicationHomeSubCluster.newInstance(applicationId, subClusterId); + } + + private boolean judgeAdd(SubClusterId filterSubCluster, SubClusterId homeSubCluster) { + if (filterSubCluster == null) { + return true; + } else if (filterSubCluster != null && filterSubCluster.equals(homeSubCluster)) { + return true; + } + return false; + } + @Override public DeleteApplicationHomeSubClusterResponse deleteApplicationHomeSubCluster( DeleteApplicationHomeSubClusterRequest request) throws YarnException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java index dc2d731e89b8e..aa07180bc6f0e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java @@ -743,9 +743,10 @@ public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( try { cstmt = getCallableStatement(CALL_SP_GET_APPLICATIONS_HOME_SUBCLUSTER); cstmt.setInt("limit_IN", maxAppsInStateStore); - String homeSubClusterIN = null;; - if (request.getSubClusterId() != null) { - homeSubClusterIN = request.getSubClusterId().toString(); + String homeSubClusterIN = null; + SubClusterId subClusterId = request.getSubClusterId(); + if (subClusterId != null) { + homeSubClusterIN = subClusterId.toString(); } cstmt.setString("homeSubCluster_IN", homeSubClusterIN); @@ -757,8 +758,8 @@ public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( while (rs.next()) { // Extract the output for each tuple - String applicationId = rs.getString(1); - String homeSubCluster = rs.getString(2); + String applicationId = rs.getString("applicationId"); + String homeSubCluster = rs.getString("homeSubCluster"); appsHomeSubClusters.add(ApplicationHomeSubCluster.newInstance( ApplicationId.fromString(applicationId), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java index 73d0ba71f6553..6e6cd1ce92b2a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java @@ -24,6 +24,7 @@ import java.util.Calendar; import java.util.List; import java.util.TimeZone; +import java.util.stream.Collectors; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.conf.Configuration; @@ -265,37 +266,47 @@ public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( throw new YarnException("Missing getApplicationsHomeSubCluster request"); } - long start = clock.getTime(); - List result = new ArrayList<>(); - SubClusterId requestSubClusterId = request.getSubClusterId(); - int appCount = 0; - try { - List childrens = zkManager.getChildren(appsZNode); - for (String child : childrens) { - if (appCount >= maxAppsInStateStore) { - break; - } - ApplicationId appId = ApplicationId.fromString(child); - SubClusterId homeSubCluster = getApp(appId); - // If the requestSubClusterId that needs to be filtered in the request - // is inconsistent with the SubClusterId in the data, continue to the next round - if (requestSubClusterId != null && !requestSubClusterId.equals(homeSubCluster)) { - continue; - } - ApplicationHomeSubCluster app = ApplicationHomeSubCluster.newInstance(appId, homeSubCluster); - result.add(app); - appCount ++; - } + long start = clock.getTime(); + SubClusterId requestSC = request.getSubClusterId(); + List children = zkManager.getChildren(appsZNode); + List result = + children.stream().map(child -> generateAppHomeSC(child)) + .filter(appHomeSC -> judgeAdd(requestSC, appHomeSC.getHomeSubCluster())) + .limit(maxAppsInStateStore) + .collect(Collectors.toList()); + long end = clock.getTime(); + opDurations.addGetAppsHomeSubClusterDuration(start, end); + LOG.info("filterSubClusterId = {}, appCount = {}.", requestSC, result.size()); + return GetApplicationsHomeSubClusterResponse.newInstance(result); } catch (Exception e) { String errMsg = "Cannot get apps: " + e.getMessage(); FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); } - long end = clock.getTime(); - opDurations.addGetAppsHomeSubClusterDuration(start, end); - LOG.info("requestSubClusterId = {}, appCount = {}.", requestSubClusterId, appCount); - return GetApplicationsHomeSubClusterResponse.newInstance(result); + throw new YarnException("Cannot get app by request"); + } + + private ApplicationHomeSubCluster generateAppHomeSC(String appId) { + try { + ApplicationId applicationId = ApplicationId.fromString(appId); + SubClusterId homeSubCluster = getApp(applicationId); + ApplicationHomeSubCluster app = + ApplicationHomeSubCluster.newInstance(applicationId, homeSubCluster); + return app; + } catch (Exception ex) { + LOG.error("get homeSubCluster by appId = {}.", appId); + } + return null; + } + + private boolean judgeAdd(SubClusterId filterSubCluster, SubClusterId homeSubCluster) { + if (filterSubCluster == null) { + return true; + } else if (filterSubCluster != null && filterSubCluster.equals(homeSubCluster)) { + return true; + } + return false; } @Override From 9c499ca83293ef4d8074c5e9aed8b14b8cd0b5ad Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Sun, 4 Sep 2022 07:09:54 -0700 Subject: [PATCH 03/25] YARN-11290. Fix CheckStyle. --- .../federation/store/impl/MemoryFederationStateStore.java | 2 +- .../store/impl/ZookeeperFederationStateStore.java | 2 +- .../store/records/GetApplicationsHomeSubClusterRequest.java | 2 +- .../federation/store/impl/HSQLDBFederationStateStore.java | 6 ++++-- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java index 9b36d5cc17a98..d78f6261aeae1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java @@ -285,7 +285,7 @@ private ApplicationHomeSubCluster generateAppHomeSC(ApplicationId applicationId) private boolean judgeAdd(SubClusterId filterSubCluster, SubClusterId homeSubCluster) { if (filterSubCluster == null) { return true; - } else if (filterSubCluster != null && filterSubCluster.equals(homeSubCluster)) { + } else if (filterSubCluster.equals(homeSubCluster)) { return true; } return false; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java index 6e6cd1ce92b2a..753008660a78b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java @@ -303,7 +303,7 @@ private ApplicationHomeSubCluster generateAppHomeSC(String appId) { private boolean judgeAdd(SubClusterId filterSubCluster, SubClusterId homeSubCluster) { if (filterSubCluster == null) { return true; - } else if (filterSubCluster != null && filterSubCluster.equals(homeSubCluster)) { + } else if (filterSubCluster.equals(homeSubCluster)) { return true; } return false; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java index 0df62d26d7ae7..78b144cd38973 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java @@ -41,7 +41,7 @@ public static GetApplicationsHomeSubClusterRequest newInstance() { @Private @Unstable public static GetApplicationsHomeSubClusterRequest - newInstance(SubClusterId subClusterId) { + newInstance(SubClusterId subClusterId) { GetApplicationsHomeSubClusterRequest request = Records.newRecord(GetApplicationsHomeSubClusterRequest.class); request.setSubClusterId(subClusterId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java index 8fcef8d429377..fdc4465ad2096 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java @@ -165,14 +165,16 @@ public class HSQLDBFederationStateStore extends SQLFederationStateStore { + " WHERE applicationId = applicationID_IN; END"; private static final String SP_GETAPPLICATIONSHOMESUBCLUSTER = - "CREATE PROCEDURE sp_getApplicationsHomeSubCluster(IN limit_IN int, IN homeSubCluster_IN varchar(256))" + "CREATE PROCEDURE sp_getApplicationsHomeSubCluster(" + + "IN limit_IN int, IN homeSubCluster_IN varchar(256))" + " MODIFIES SQL DATA DYNAMIC RESULT SETS 1 BEGIN ATOMIC" + " DECLARE result CURSOR FOR" + " SELECT applicationId, homeSubCluster" + " FROM applicationsHomeSubCluster " + " WHERE ROWNUM() <= limit_IN AND " + " CASE WHEN homeSubCluster_IN IS NULL THEN 1 = 1 " - + " WHEN homeSubCluster_IN IS NOT NULL THEN homeSubCluster = homeSubCluster_IN END; OPEN result; END"; + + " WHEN homeSubCluster_IN IS NOT NULL " + + " THEN homeSubCluster = homeSubCluster_IN END; OPEN result; END"; private static final String SP_DELETEAPPLICATIONHOMESUBCLUSTER = "CREATE PROCEDURE sp_deleteApplicationHomeSubCluster(" From 354b8b3ba130fe90e92cfddaddce8e9ebfa3fa44 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Sun, 4 Sep 2022 16:01:34 -0700 Subject: [PATCH 04/25] YARN-11290. Modify the sqlserver stored procedure script. --- .../FederationStateStoreStoreProcs.sql | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql index 66d6f0e203558..6a2e8fbd9e272 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql @@ -111,12 +111,28 @@ IF OBJECT_ID ( '[sp_getApplicationsHomeSubCluster]', 'P' ) IS NOT NULL GO CREATE PROCEDURE [dbo].[sp_getApplicationsHomeSubCluster] + @limit int, + @homeSubCluster VARCHAR(256) AS BEGIN DECLARE @errorMessage nvarchar(4000) BEGIN TRY - SELECT [applicationId], [homeSubCluster], [createTime] - FROM [dbo].[applicationsHomeSubCluster] + SELECT + [applicationId], + [homeSubCluster], + [createTime] + FROM + (SELECT + [applicationId], + [homeSubCluster], + [createTime], + row_number() over(order by [createTime] desc) as row_num + FROM [dbo].[applicationsHomeSubCluster]) AS t + WHERE row_num < @limit + AND (CASE WHEN @homeSubCluster IS NULL THEN 1 + WHEN @homeSubCluster IS NOT NULL + AND [homeSubCluster] = @homeSubCluster + THEN 1 END) = 1 END TRY BEGIN CATCH From eecc58871e86f20f513c7499777785f628f12367 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Mon, 5 Sep 2022 11:26:36 +0800 Subject: [PATCH 05/25] YARN-11290. Add Mysql sp_getApplicationsHomeSubCluster Script And Fix Junit Test. --- .../MySQL/FederationStateStoreStoredProcs.sql | 8 +++++-- .../TestFederationClientInterceptor.java | 17 +++----------- .../TestableFederationClientInterceptor.java | 23 +++++++++++++++++++ 3 files changed, 32 insertions(+), 16 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql index ff755c5f56614..42996ab22cf77 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql @@ -122,14 +122,18 @@ BEGIN WHERE applicationId = applicationID_IN; END // -CREATE PROCEDURE sp_getApplicationsHomeSubCluster(IN limit_IN int) +CREATE PROCEDURE sp_getApplicationsHomeSubCluster(IN limit_IN int, IN homeSubCluster_IN varchar(256)) BEGIN SELECT applicationId, homeSubCluster FROM (SELECT applicationId, homeSubCluster, ROW_NUMBER() OVER (ORDER BY applicationId) AS row_num - FROM applicationsHomeSubCluster) WHERE row_num <= limit_IN + FROM applicationsHomeSubCluster) + WHERE row_num <= limit_IN + AND CASE WHEN homeSubCluster_IN IS NULL THEN 1 = 1 + WHEN homeSubCluster_IN IS NOT NULL THEN homeSubCluster = homeSubCluster_IN + END END // CREATE PROCEDURE sp_deleteApplicationHomeSubCluster( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java index 93a759bc40eb4..9b14fc7c0fd01 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java @@ -28,10 +28,12 @@ import java.util.Map; import java.util.HashMap; import java.util.Set; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.Arrays; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.MockApps; @@ -132,6 +134,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; @@ -1308,13 +1311,6 @@ public void testSubmitReservation() throws Exception { GetNewReservationResponse response = interceptor.getNewReservation(request); Assert.assertNotNull(response); - // allow plan follower to synchronize, manually trigger an assignment - Map mockRMs = interceptor.getMockRMs(); - for (MockRM mockRM : mockRMs.values()) { - ReservationSystem reservationSystem = mockRM.getReservationSystem(); - reservationSystem.synchronizePlan("root.decided", true); - } - // Submit Reservation ReservationId reservationId = response.getReservationId(); ReservationDefinition rDefinition = createReservationDefinition(1024, 1); @@ -1384,13 +1380,6 @@ public void testSubmitReservationMultipleSubmission() throws Exception { GetNewReservationResponse response = interceptor.getNewReservation(request); Assert.assertNotNull(response); - // allow plan follower to synchronize, manually trigger an assignment - Map mockRMs = interceptor.getMockRMs(); - for (MockRM mockRM : mockRMs.values()) { - ReservationSystem reservationSystem = mockRM.getReservationSystem(); - reservationSystem.synchronizePlan("root.decided", true); - } - // First Submit Reservation ReservationId reservationId = response.getReservationId(); ReservationDefinition rDefinition = createReservationDefinition(1024, 1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestableFederationClientInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestableFederationClientInterceptor.java index 7c82476ec4767..2f535a0245cfc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestableFederationClientInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestableFederationClientInterceptor.java @@ -28,13 +28,16 @@ import java.util.Map; import java.util.HashMap; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeoutException; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; import org.apache.hadoop.yarn.api.records.NodeAttribute; import org.apache.hadoop.yarn.api.records.NodeAttributeType; +import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.nodelabels.NodeAttributesManager; import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; @@ -43,6 +46,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.RMAppManager; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan; +import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.security.QueueACLsManager; import org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager; @@ -90,6 +95,7 @@ protected ApplicationClientProtocol getClientRMProxyForSubCluster( mockRMs.put(subClusterId, mockRM); } initNodeAttributes(subClusterId, mockRM); + initReservationSystem(mockRM); return mockRM.getClientRMService(); } } @@ -161,4 +167,21 @@ private void initNodeAttributes(SubClusterId subClusterId, MockRM mockRM) { throw new RuntimeException(e); } } + + private void initReservationSystem(MockRM mockRM) throws YarnException { + try { + // Ensure that the reserved resources of the RM#Reservation System are allocated + String planName = "root.decided"; + ReservationSystem reservationSystem = mockRM.getReservationSystem(); + reservationSystem.synchronizePlan(planName, true); + + GenericTestUtils.waitFor(() -> { + Plan plan = reservationSystem.getPlan(planName); + Resource resource = plan.getTotalCapacity(); + return (resource.getMemorySize() > 0 && resource.getVirtualCores() > 0); + }, 100, 2000); + } catch (TimeoutException | InterruptedException e) { + throw new YarnException(e); + } + } } From 499986a60fe4964123963da2b54b817c79f2c2ca Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Mon, 5 Sep 2022 08:10:55 -0700 Subject: [PATCH 06/25] YARN-11290. Fix CheckStyle. --- .../MySQL/FederationStateStoreStoredProcs.sql | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql index 42996ab22cf77..79aaacfeb3264 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql @@ -124,16 +124,15 @@ END // CREATE PROCEDURE sp_getApplicationsHomeSubCluster(IN limit_IN int, IN homeSubCluster_IN varchar(256)) BEGIN + SET @row_num = 0; SELECT applicationId, homeSubCluster - FROM - (SELECT - applicationId, homeSubCluster, ROW_NUMBER() OVER (ORDER BY applicationId) AS row_num - FROM applicationsHomeSubCluster) - WHERE row_num <= limit_IN - AND CASE WHEN homeSubCluster_IN IS NULL THEN 1 = 1 - WHEN homeSubCluster_IN IS NOT NULL THEN homeSubCluster = homeSubCluster_IN - END + FROM (SELECT + applicationId, homeSubCluster, (@rownum := @rownum + 1) AS row_num + FROM applicationsHomeSubCluster) AS t + WHERE row_num <= limit_IN + AND (CASE WHEN homeSubCluster_IN IS NULL THEN 1 = 1 + WHEN homeSubCluster_IN IS NOT NULL THEN homeSubCluster = homeSubCluster_IN END); END // CREATE PROCEDURE sp_deleteApplicationHomeSubCluster( From e1ee2a3a5968f91d217c5597537c6ee1b253c21d Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Tue, 6 Sep 2022 08:09:06 +0800 Subject: [PATCH 07/25] YARN-11290. Fix submitReservation First Add --- .../server/router/clientrm/FederationClientInterceptor.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java index 04452af365f51..33c1c796df428 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java @@ -949,7 +949,7 @@ public ReservationSubmissionResponse submitReservation( // Second, determine whether the current ReservationId has a corresponding subCluster. // If it does not exist, add it. If it exists, update it. Boolean exists = existsReservationHomeSubCluster(reservationId); - if (!exists) { + if (!exists || i == 0) { addReservationHomeSubCluster(reservationId, reservationHomeSubCluster); } else { updateReservationHomeSubCluster(subClusterId, reservationId, reservationHomeSubCluster); From 023bb5c6fa2b255957ee493b5fccf602fe5e4cd2 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Tue, 6 Sep 2022 14:22:44 +0800 Subject: [PATCH 08/25] YARN-11290. Fix SqlServer Script. --- .../SQLServer/FederationStateStoreStoreProcs.sql | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql index 6a2e8fbd9e272..bb79e8409e3e8 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql @@ -130,9 +130,8 @@ AS BEGIN FROM [dbo].[applicationsHomeSubCluster]) AS t WHERE row_num < @limit AND (CASE WHEN @homeSubCluster IS NULL THEN 1 - WHEN @homeSubCluster IS NOT NULL - AND [homeSubCluster] = @homeSubCluster - THEN 1 END) = 1 + WHEN @homeSubCluster IS NOT NULL AND [homeSubCluster] = @homeSubCluster THEN 1 + ELSE 0 END) = 1 END TRY BEGIN CATCH From d309ac58589009db6f6a35a28a000d01b910fa3c Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Tue, 6 Sep 2022 16:50:52 +0800 Subject: [PATCH 09/25] YARN-11290. Fix CheckStyle. --- .../router/clientrm/TestFederationClientInterceptor.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java index 9b14fc7c0fd01..f80881f7b21b4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java @@ -28,12 +28,10 @@ import java.util.Map; import java.util.HashMap; import java.util.Set; -import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.Arrays; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.MockApps; @@ -134,7 +132,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; -import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; From e96f7961e5198dcf4555fb468c9bf3d4969974b9 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Tue, 6 Sep 2022 23:22:05 +0800 Subject: [PATCH 10/25] YARN-11290. Fix CheckStyle. --- .../SQLServer/FederationStateStoreStoreProcs.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql index bb79e8409e3e8..11cf04d65ad0b 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql @@ -126,9 +126,9 @@ AS BEGIN [applicationId], [homeSubCluster], [createTime], - row_number() over(order by [createTime] desc) as row_num + row_number() over(partition by [homeSubCluster] order by [createTime] desc) as row_num FROM [dbo].[applicationsHomeSubCluster]) AS t - WHERE row_num < @limit + WHERE row_num <= @limit AND (CASE WHEN @homeSubCluster IS NULL THEN 1 WHEN @homeSubCluster IS NOT NULL AND [homeSubCluster] = @homeSubCluster THEN 1 ELSE 0 END) = 1 From 40f6d999ba00a9881ea94bdb374ec4441fa7fdd6 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Wed, 7 Sep 2022 09:27:54 +0800 Subject: [PATCH 11/25] YARN-11290. Fix CodeStyle. --- .../store/impl/MemoryFederationStateStore.java | 15 ++++++++++++--- .../store/impl/ZookeeperFederationStateStore.java | 5 +++-- .../GetApplicationsHomeSubClusterRequest.java | 3 ++- .../store/impl/FederationStateStoreBaseTest.java | 6 ++++-- 4 files changed, 21 insertions(+), 8 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java index d78f6261aeae1..afb3e99733571 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java @@ -269,7 +269,7 @@ public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( SubClusterId requestSC = request.getSubClusterId(); List result = applications.keySet().stream() .map(applicationId -> generateAppHomeSC(applicationId)) - .filter(appHomeSC -> judgeAdd(requestSC, appHomeSC.getHomeSubCluster())) + .filter(appHomeSC -> filterHomeSubCluster(requestSC, appHomeSC.getHomeSubCluster())) .limit(maxAppsInStateStore) .collect(Collectors.toList()); @@ -282,12 +282,21 @@ private ApplicationHomeSubCluster generateAppHomeSC(ApplicationId applicationId) return ApplicationHomeSubCluster.newInstance(applicationId, subClusterId); } - private boolean judgeAdd(SubClusterId filterSubCluster, SubClusterId homeSubCluster) { + private boolean filterHomeSubCluster(SubClusterId filterSubCluster, + SubClusterId homeSubCluster) { + + // If the filter condition is empty, + // it means that homeSubCluster needs to be added if (filterSubCluster == null) { return true; - } else if (filterSubCluster.equals(homeSubCluster)) { + } + + // If the filter condition filterSubCluster is not empty, + // and filterSubCluster is equal to homeSubCluster, it needs to be added + if (filterSubCluster.equals(homeSubCluster)) { return true; } + return false; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java index 753008660a78b..295423bf52f0d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java @@ -272,7 +272,7 @@ public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( List children = zkManager.getChildren(appsZNode); List result = children.stream().map(child -> generateAppHomeSC(child)) - .filter(appHomeSC -> judgeAdd(requestSC, appHomeSC.getHomeSubCluster())) + .filter(appHomeSC -> filterHomeSubCluster(requestSC, appHomeSC.getHomeSubCluster())) .limit(maxAppsInStateStore) .collect(Collectors.toList()); long end = clock.getTime(); @@ -300,7 +300,8 @@ private ApplicationHomeSubCluster generateAppHomeSC(String appId) { return null; } - private boolean judgeAdd(SubClusterId filterSubCluster, SubClusterId homeSubCluster) { + private boolean filterHomeSubCluster(SubClusterId filterSubCluster, + SubClusterId homeSubCluster) { if (filterSubCluster == null) { return true; } else if (filterSubCluster.equals(homeSubCluster)) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java index 78b144cd38973..f6313df83a793 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.federation.store.records; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.Records; @@ -54,7 +55,7 @@ public static GetApplicationsHomeSubClusterRequest newInstance() { * * @return the subcluster identifier */ - @InterfaceAudience.Public + @Public @Unstable public abstract SubClusterId getSubClusterId(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java index af5f8e61be040..5891c952dc759 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java @@ -83,6 +83,8 @@ public abstract class FederationStateStoreBaseTest { private static final MonotonicClock CLOCK = new MonotonicClock(); private FederationStateStore stateStore; + private static final int TEN_ROUNDS = 10; + private static final int TWENTY_ROUNDS = 20; protected abstract FederationStateStore createStateStore(); @@ -426,7 +428,7 @@ public void testGetApplicationsHomeSubClusterFilter() throws Exception { Set appHomeSubClusters = new HashSet<>(); - for (int i = 0; i < 10; i++) { + for (int i = 0; i < TEN_ROUNDS; i++) { ApplicationId appId = ApplicationId.newInstance(now, i); SubClusterId subClusterId = SubClusterId.newInstance("SC1"); addApplicationHomeSC(appId, subClusterId); @@ -436,7 +438,7 @@ public void testGetApplicationsHomeSubClusterFilter() throws Exception { } // Add ApplicationHomeSC - SC2 - for (int i = 10; i < 20; i++) { + for (int i = TEN_ROUNDS; i < TWENTY_ROUNDS; i++) { ApplicationId appId = ApplicationId.newInstance(now, i); SubClusterId subClusterId = SubClusterId.newInstance("SC2"); addApplicationHomeSC(appId, subClusterId); From f6df901f99a27a82e70e4b1aefbdc0b7cfdfd11c Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Thu, 8 Sep 2022 23:47:32 +0800 Subject: [PATCH 12/25] YARN-11290. Fix CheckStyle. --- .../MySQL/FederationStateStoreStoredProcs.sql | 28 ++++++++++++----- .../MySQL/FederationStateStoreTables.sql | 3 +- .../FederationStateStoreStoreProcs.sql | 4 ++- .../src/main/resources/yarn-default.xml | 5 ++- .../impl/MemoryFederationStateStore.java | 10 +++--- .../impl/ZookeeperFederationStateStore.java | 9 +++--- .../records/ApplicationHomeSubCluster.java | 31 +++++++++++++++++++ .../pb/ApplicationHomeSubClusterPBImpl.java | 10 ++++++ .../proto/yarn_server_federation_protos.proto | 1 + 9 files changed, 77 insertions(+), 24 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql index f5f5500e293c6..24f80df4fb180 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql @@ -124,15 +124,27 @@ END // CREATE PROCEDURE sp_getApplicationsHomeSubCluster(IN limit_IN int, IN homeSubCluster_IN varchar(256)) BEGIN - SET @row_num = 0; SELECT - applicationId, homeSubCluster - FROM (SELECT - applicationId, homeSubCluster, (@rownum := @rownum + 1) AS row_num - FROM applicationsHomeSubCluster) AS t - WHERE row_num <= limit_IN - AND (CASE WHEN homeSubCluster_IN IS NULL THEN 1 = 1 - WHEN homeSubCluster_IN IS NOT NULL THEN homeSubCluster = homeSubCluster_IN END); + t4.applicationId, + t4.createTime, + t4.homeSubCluster, + t4.row_num + FROM ( + SELECT + t2.applicationId, + t2.createTime, + t2.homeSubCluster, + @row_num := @row_num + 1 AS row_num + FROM ( + SELECT + t.applicationId, + t.homeSubCluster, + t.createTime + FROM applicationshomesubcluster AS t + WHERE ( CASE WHEN t.homeSubCluster_IN IS NULL THEN 1 = 1 WHEN t.homeSubCluster_IN IS NOT NULL THEN homeSubCluster = homeSubCluster_IN END ) + ORDER BY t.createTime DESC) AS t2, (SELECT @row_num := 0) AS t3 + ) AS t4 +WHERE t4.row_num < limit_IN; END // CREATE PROCEDURE sp_deleteApplicationHomeSubCluster( diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreTables.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreTables.sql index 6a3188bab6eab..8a0941698e6be 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreTables.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreTables.sql @@ -22,7 +22,8 @@ USE FederationStateStore CREATE TABLE applicationsHomeSubCluster( applicationId varchar(64) NOT NULL, - homeSubCluster varchar(256) NULL, + homeSubCluster varchar(256) NOT NULL, + createTime datetime NOT NULL, CONSTRAINT pk_applicationId PRIMARY KEY (applicationId) ); diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql index b18b00209b0d6..d3d55a488bdaf 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql @@ -126,7 +126,9 @@ AS BEGIN [applicationId], [homeSubCluster], [createTime], - row_number() over(partition by [homeSubCluster] order by [createTime] desc) as row_num + CASE WHEN @homeSubCluster IS NULL THEN row_number() over(order by [createTime] desc) + WHEN @homeSubCluster IS NOT NULL THEN row_number() over(partition by [homeSubCluster] order by [createTime] desc) + END AS row_num FROM [dbo].[applicationsHomeSubCluster]) AS t WHERE row_num <= @limit AND (CASE WHEN @homeSubCluster IS NULL THEN 1 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 5cdbab4ac6164..a97bdcbc76df3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -4967,9 +4967,8 @@ yarn.federation.state-store.max-applications 1000 - yarn federation state-store supports - querying the maximum number of apps - Default is 1000 + Yarn federation state-store supports querying the maximum number of apps. + Default is 1000. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java index afb3e99733571..ca9a630c871f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java @@ -17,16 +17,13 @@ package org.apache.hadoop.yarn.server.federation.store.impl; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.List; -import java.util.Map; +import java.util.*; import java.util.Map.Entry; -import java.util.TimeZone; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ReservationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -269,6 +266,7 @@ public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( SubClusterId requestSC = request.getSubClusterId(); List result = applications.keySet().stream() .map(applicationId -> generateAppHomeSC(applicationId)) + .sorted(Comparator.comparing(ApplicationHomeSubCluster::getCreateTime).reversed()) .filter(appHomeSC -> filterHomeSubCluster(requestSC, appHomeSC.getHomeSubCluster())) .limit(maxAppsInStateStore) .collect(Collectors.toList()); @@ -279,7 +277,7 @@ public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( private ApplicationHomeSubCluster generateAppHomeSC(ApplicationId applicationId) { SubClusterId subClusterId = applications.get(applicationId); - return ApplicationHomeSubCluster.newInstance(applicationId, subClusterId); + return ApplicationHomeSubCluster.newInstance(applicationId, Time.now(), subClusterId); } private boolean filterHomeSubCluster(SubClusterId filterSubCluster, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java index 295423bf52f0d..15b54c24177aa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java @@ -20,14 +20,12 @@ import static org.apache.hadoop.util.curator.ZKCuratorManager.getNodePath; import java.io.IOException; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.List; -import java.util.TimeZone; +import java.util.*; import java.util.stream.Collectors; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Time; import org.apache.hadoop.util.curator.ZKCuratorManager; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -272,6 +270,7 @@ public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( List children = zkManager.getChildren(appsZNode); List result = children.stream().map(child -> generateAppHomeSC(child)) + .sorted(Comparator.comparing(ApplicationHomeSubCluster::getCreateTime).reversed()) .filter(appHomeSC -> filterHomeSubCluster(requestSC, appHomeSC.getHomeSubCluster())) .limit(maxAppsInStateStore) .collect(Collectors.toList()); @@ -292,7 +291,7 @@ private ApplicationHomeSubCluster generateAppHomeSC(String appId) { ApplicationId applicationId = ApplicationId.fromString(appId); SubClusterId homeSubCluster = getApp(applicationId); ApplicationHomeSubCluster app = - ApplicationHomeSubCluster.newInstance(applicationId, homeSubCluster); + ApplicationHomeSubCluster.newInstance(applicationId, Time.now(), homeSubCluster); return app; } catch (Exception ex) { LOG.error("get homeSubCluster by appId = {}.", appId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ApplicationHomeSubCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ApplicationHomeSubCluster.java index 5e4c7ccf4ef9e..a9053d7b4496b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ApplicationHomeSubCluster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ApplicationHomeSubCluster.java @@ -51,6 +51,18 @@ public static ApplicationHomeSubCluster newInstance(ApplicationId appId, return appMapping; } + @Private + @Unstable + public static ApplicationHomeSubCluster newInstance(ApplicationId appId, long createTime, + SubClusterId homeSubCluster) { + ApplicationHomeSubCluster appMapping = + Records.newRecord(ApplicationHomeSubCluster.class); + appMapping.setApplicationId(appId); + appMapping.setHomeSubCluster(homeSubCluster); + appMapping.setCreateTime(createTime); + return appMapping; + } + /** * Get the {@link ApplicationId} representing the unique identifier of the * application. @@ -91,6 +103,25 @@ public static ApplicationHomeSubCluster newInstance(ApplicationId appId, @Unstable public abstract void setHomeSubCluster(SubClusterId homeSubCluster); + /** + * Get the create time of the subcluster. + * + * @return the state of the subcluster + */ + @Public + @Unstable + public abstract long getCreateTime(); + + /** + * Set the create time of the subcluster. + * + * @param time the last heartbeat time of the subcluster + */ + @Private + @Unstable + public abstract void setCreateTime(long time); + + @Override public boolean equals(Object obj) { if (this == obj) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/ApplicationHomeSubClusterPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/ApplicationHomeSubClusterPBImpl.java index 05b0b62649f44..a72a431430d5b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/ApplicationHomeSubClusterPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/ApplicationHomeSubClusterPBImpl.java @@ -149,6 +149,16 @@ public void setHomeSubCluster(SubClusterId homeSubCluster) { this.homeSubCluster = homeSubCluster; } + @Override + public long getCreateTime() { + return 0; + } + + @Override + public void setCreateTime(long time) { + + } + private SubClusterId convertFromProtoFormat(SubClusterIdProto subClusterId) { return new SubClusterIdPBImpl(subClusterId); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_federation_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_federation_protos.proto index 7c23e9a3be85b..0c23cca1513be 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_federation_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_federation_protos.proto @@ -96,6 +96,7 @@ message GetSubClustersInfoResponseProto { message ApplicationHomeSubClusterProto { optional ApplicationIdProto application_id = 1; optional SubClusterIdProto home_sub_cluster = 2; + optional int64 create_time = 3; } message AddApplicationHomeSubClusterRequestProto { From 152300c26052d1893a9ba5e768a64e2765972b59 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Fri, 9 Sep 2022 21:12:02 +0800 Subject: [PATCH 13/25] YARN-11290. Modify SQL code. --- .../MySQL/FederationStateStoreStoredProcs.sql | 37 +++++++++---------- .../store/impl/SQLFederationStateStore.java | 6 +-- 2 files changed, 20 insertions(+), 23 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql index 24f80df4fb180..e541f9a4aa354 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql @@ -125,26 +125,23 @@ END // CREATE PROCEDURE sp_getApplicationsHomeSubCluster(IN limit_IN int, IN homeSubCluster_IN varchar(256)) BEGIN SELECT - t4.applicationId, - t4.createTime, - t4.homeSubCluster, - t4.row_num - FROM ( - SELECT - t2.applicationId, - t2.createTime, - t2.homeSubCluster, - @row_num := @row_num + 1 AS row_num - FROM ( - SELECT - t.applicationId, - t.homeSubCluster, - t.createTime - FROM applicationshomesubcluster AS t - WHERE ( CASE WHEN t.homeSubCluster_IN IS NULL THEN 1 = 1 WHEN t.homeSubCluster_IN IS NOT NULL THEN homeSubCluster = homeSubCluster_IN END ) - ORDER BY t.createTime DESC) AS t2, (SELECT @row_num := 0) AS t3 - ) AS t4 -WHERE t4.row_num < limit_IN; + applicationId, + homeSubCluster, + createTime + FROM + (SELECT + applicationId, + homeSubCluster, + createTime, + @app_rank := IF(@home_sc = homeSubCluster, @app_rank + 1, 1) AS app_rank, + @home_sc := homeSubCluster + FROM applicationshomesubcluster + ORDER BY createTime DESC + ) ranked + WHERE app_rank <= limit_IN + AND (CASE WHEN t.homeSubCluster_IN IS NULL THEN 1 = 1 + WHEN t.homeSubCluster_IN IS NOT NULL THEN homeSubCluster = homeSubCluster_IN + END); END // CREATE PROCEDURE sp_deleteApplicationHomeSubCluster( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java index 1dbf14df7e4b5..48e6a50587a66 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java @@ -774,7 +774,7 @@ public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( rs = cstmt.executeQuery(); long stopTime = clock.getTime(); - while (rs.next()) { + while (rs.next() && appsHomeSubClusters.size() <= maxAppsInStateStore) { // Extract the output for each tuple String applicationId = rs.getString("applicationId"); @@ -796,8 +796,8 @@ public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( // Return to the pool the CallableStatement FederationStateStoreUtils.returnToPool(LOG, cstmt, null, rs); } - return GetApplicationsHomeSubClusterResponse - .newInstance(appsHomeSubClusters); + + return GetApplicationsHomeSubClusterResponse.newInstance(appsHomeSubClusters); } @Override From c7620afccf192071761dbdbbfc97a5564649b8ef Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Fri, 9 Sep 2022 21:14:31 +0800 Subject: [PATCH 14/25] YARN-11290. Modify SQL code. --- .../SQLServer/FederationStateStoreStoreProcs.sql | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql index d3d55a488bdaf..fb807028d7cb0 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql @@ -126,9 +126,7 @@ AS BEGIN [applicationId], [homeSubCluster], [createTime], - CASE WHEN @homeSubCluster IS NULL THEN row_number() over(order by [createTime] desc) - WHEN @homeSubCluster IS NOT NULL THEN row_number() over(partition by [homeSubCluster] order by [createTime] desc) - END AS row_num + row_number() over(partition by [homeSubCluster] order by [createTime] desc) AS row_num FROM [dbo].[applicationsHomeSubCluster]) AS t WHERE row_num <= @limit AND (CASE WHEN @homeSubCluster IS NULL THEN 1 From a536e30b188accfc6024ae18c4fa5a4707a7cd96 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Mon, 12 Sep 2022 16:32:17 +0800 Subject: [PATCH 15/25] YARN-11290. Improve the code. --- .../MySQL/FederationStateStoreStoredProcs.sql | 20 +++++++++---------- .../impl/MemoryFederationStateStore.java | 7 ++++++- .../impl/ZookeeperFederationStateStore.java | 6 +++++- .../GetApplicationsHomeSubClusterRequest.java | 3 +-- .../impl/HSQLDBFederationStateStore.java | 6 ++++-- .../TestableFederationClientInterceptor.java | 2 +- 6 files changed, 27 insertions(+), 17 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql index e541f9a4aa354..2cb00da96f6d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql @@ -126,22 +126,22 @@ CREATE PROCEDURE sp_getApplicationsHomeSubCluster(IN limit_IN int, IN homeSubClu BEGIN SELECT applicationId, - homeSubCluster, - createTime + homeSubCluster, + createTime FROM (SELECT applicationId, - homeSubCluster, - createTime, - @app_rank := IF(@home_sc = homeSubCluster, @app_rank + 1, 1) AS app_rank, - @home_sc := homeSubCluster - FROM applicationshomesubcluster - ORDER BY createTime DESC + homeSubCluster, + createTime, + @app_rank := IF(@home_sc = homeSubCluster, @app_rank + 1, 1) AS app_rank, + @home_sc := homeSubCluster + FROM applicationshomesubcluster + ORDER BY createTime DESC ) ranked WHERE app_rank <= limit_IN AND (CASE WHEN t.homeSubCluster_IN IS NULL THEN 1 = 1 - WHEN t.homeSubCluster_IN IS NOT NULL THEN homeSubCluster = homeSubCluster_IN - END); + WHEN t.homeSubCluster_IN IS NOT NULL THEN homeSubCluster = homeSubCluster_IN + END); END // CREATE PROCEDURE sp_deleteApplicationHomeSubCluster( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java index ca9a630c871f3..e42cb8ebec204 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java @@ -17,10 +17,15 @@ package org.apache.hadoop.yarn.server.federation.store.impl; -import java.util.*; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.List; +import java.util.Map; import java.util.Map.Entry; +import java.util.TimeZone; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; +import java.util.Comparator; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Time; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java index 15b54c24177aa..4a2adaef92f99 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java @@ -20,7 +20,11 @@ import static org.apache.hadoop.util.curator.ZKCuratorManager.getNodePath; import java.io.IOException; -import java.util.*; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.List; +import java.util.TimeZone; +import java.util.Comparator; import java.util.stream.Collectors; import org.apache.hadoop.classification.VisibleForTesting; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java index f6313df83a793..06b6987dcbafb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java @@ -17,7 +17,6 @@ package org.apache.hadoop.yarn.server.federation.store.records; -import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -65,7 +64,7 @@ public static GetApplicationsHomeSubClusterRequest newInstance() { * * @param subClusterId the subcluster identifier */ - @InterfaceAudience.Public + @Public @Unstable public abstract void setSubClusterId(SubClusterId subClusterId); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java index 39b235db4264f..e15df3e489a54 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java @@ -42,6 +42,7 @@ public class HSQLDBFederationStateStore extends SQLFederationStateStore { " CREATE TABLE applicationsHomeSubCluster (" + " applicationId varchar(64) NOT NULL," + " homeSubCluster varchar(256) NOT NULL," + + " createTime datetime NOT NULL," + " CONSTRAINT pk_applicationId PRIMARY KEY (applicationId))"; private static final String TABLE_MEMBERSHIP = @@ -175,12 +176,13 @@ public class HSQLDBFederationStateStore extends SQLFederationStateStore { + "IN limit_IN int, IN homeSubCluster_IN varchar(256))" + " MODIFIES SQL DATA DYNAMIC RESULT SETS 1 BEGIN ATOMIC" + " DECLARE result CURSOR FOR" - + " SELECT applicationId, homeSubCluster" + + " SELECT applicationId, homeSubCluster, createTime" + " FROM applicationsHomeSubCluster " + " WHERE ROWNUM() <= limit_IN AND " + " CASE WHEN homeSubCluster_IN IS NULL THEN 1 = 1 " + " WHEN homeSubCluster_IN IS NOT NULL " - + " THEN homeSubCluster = homeSubCluster_IN END; OPEN result; END"; + + " THEN homeSubCluster = homeSubCluster_IN END ORDER BY createTime desc; " + + " OPEN result; END"; private static final String SP_DELETEAPPLICATIONHOMESUBCLUSTER = "CREATE PROCEDURE sp_deleteApplicationHomeSubCluster(" diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestableFederationClientInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestableFederationClientInterceptor.java index 6a4b32ba20401..8279899e387ad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestableFederationClientInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestableFederationClientInterceptor.java @@ -189,7 +189,7 @@ private void initReservationSystem(MockRM mockRM) throws YarnException { throw new YarnException(e); } } - + @Override public void shutdown() { if (mockRMs != null && !mockRMs.isEmpty()) { From 1d000209b4845a7bae18ddcda5ca5ff147454fc2 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Mon, 12 Sep 2022 16:35:21 +0800 Subject: [PATCH 16/25] YARN-11290. Fix CheckStyle. --- .../MySQL/FederationStateStoreStoredProcs.sql | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql index 2cb00da96f6d0..2880e86194dfd 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql @@ -126,21 +126,21 @@ CREATE PROCEDURE sp_getApplicationsHomeSubCluster(IN limit_IN int, IN homeSubClu BEGIN SELECT applicationId, - homeSubCluster, - createTime + homeSubCluster, + createTime FROM (SELECT - applicationId, - homeSubCluster, - createTime, - @app_rank := IF(@home_sc = homeSubCluster, @app_rank + 1, 1) AS app_rank, - @home_sc := homeSubCluster + applicationId, + homeSubCluster, + createTime, + @app_rank := IF(@home_sc = homeSubCluster, @app_rank + 1, 1) AS app_rank, + @home_sc := homeSubCluster FROM applicationshomesubcluster ORDER BY createTime DESC ) ranked WHERE app_rank <= limit_IN AND (CASE WHEN t.homeSubCluster_IN IS NULL THEN 1 = 1 - WHEN t.homeSubCluster_IN IS NOT NULL THEN homeSubCluster = homeSubCluster_IN + WHEN t.homeSubCluster_IN IS NOT NULL THEN homeSubCluster = homeSubCluster_IN END); END // From 411d4d2af5b564ecaa90c3688ccdfcf7a6d2047a Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Tue, 13 Sep 2022 14:13:18 +0800 Subject: [PATCH 17/25] YARN-11290. Improve Mysql Script. --- .../MySQL/FederationStateStoreStoredProcs.sql | 23 ++++++++----------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql index 2880e86194dfd..9d2d3f78d1d33 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql @@ -128,20 +128,15 @@ BEGIN applicationId, homeSubCluster, createTime - FROM - (SELECT - applicationId, - homeSubCluster, - createTime, - @app_rank := IF(@home_sc = homeSubCluster, @app_rank + 1, 1) AS app_rank, - @home_sc := homeSubCluster - FROM applicationshomesubcluster - ORDER BY createTime DESC - ) ranked - WHERE app_rank <= limit_IN - AND (CASE WHEN t.homeSubCluster_IN IS NULL THEN 1 = 1 - WHEN t.homeSubCluster_IN IS NOT NULL THEN homeSubCluster = homeSubCluster_IN - END); + FROM + (SELECT + *, + @rownum := 0, + IF(homeSubCluster_IN = '', 1, (homeSubCluster = homeSubCluster_IN)) AS filter_result + FROM applicationshomesubcluster + ORDER BY createTime DESC) AS app_home_sc + WHERE filter_result = 1 + AND (@rownum := @rownum + 1) <= limit_IN; END // CREATE PROCEDURE sp_deleteApplicationHomeSubCluster( From a12ea7e34e1260902736341242523cb44d31a303 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Wed, 14 Sep 2022 03:18:36 +0800 Subject: [PATCH 18/25] YARN-11290. Fix CheckStyle. --- .../FederationStateStoreStoreProcs.sql | 40 ++++++++++++------- .../records/ApplicationHomeSubCluster.java | 3 +- .../impl/FederationStateStoreBaseTest.java | 8 ++-- 3 files changed, 30 insertions(+), 21 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql index fb807028d7cb0..32b919597094a 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql @@ -117,21 +117,31 @@ AS BEGIN DECLARE @errorMessage nvarchar(4000) BEGIN TRY - SELECT - [applicationId], - [homeSubCluster], - [createTime] - FROM - (SELECT - [applicationId], - [homeSubCluster], - [createTime], - row_number() over(partition by [homeSubCluster] order by [createTime] desc) AS row_num - FROM [dbo].[applicationsHomeSubCluster]) AS t - WHERE row_num <= @limit - AND (CASE WHEN @homeSubCluster IS NULL THEN 1 - WHEN @homeSubCluster IS NOT NULL AND [homeSubCluster] = @homeSubCluster THEN 1 - ELSE 0 END) = 1 + IF @homeSubCluster = '' + SELECT + [applicationId], + [homeSubCluster], + [createTime] + FROM(SELECT + [applicationId], + [homeSubCluster], + [createTime], + row_number() over(order by [createTime] desc) AS app_rank + FROM [dbo].[applicationsHomeSubCluster]) AS t + WHERE app_rank <= @limit; + ELSE + SELECT + [applicationId], + [homeSubCluster], + [createTime] + FROM(SELECT + [applicationId], + [homeSubCluster], + [createTime], + row_number() over(partition by [homeSubCluster] order by [createTime] desc) AS app_rank + FROM [dbo].[applicationsHomeSubCluster] + WHERE [homeSubCluster] = @homeSubCluster) AS t + WHERE app_rank <= @limit; END TRY BEGIN CATCH diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ApplicationHomeSubCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ApplicationHomeSubCluster.java index a9053d7b4496b..898e11f182015 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ApplicationHomeSubCluster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ApplicationHomeSubCluster.java @@ -55,8 +55,7 @@ public static ApplicationHomeSubCluster newInstance(ApplicationId appId, @Unstable public static ApplicationHomeSubCluster newInstance(ApplicationId appId, long createTime, SubClusterId homeSubCluster) { - ApplicationHomeSubCluster appMapping = - Records.newRecord(ApplicationHomeSubCluster.class); + ApplicationHomeSubCluster appMapping = Records.newRecord(ApplicationHomeSubCluster.class); appMapping.setApplicationId(appId); appMapping.setHomeSubCluster(homeSubCluster); appMapping.setCreateTime(createTime); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java index 5891c952dc759..3759e349bc178 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java @@ -83,8 +83,8 @@ public abstract class FederationStateStoreBaseTest { private static final MonotonicClock CLOCK = new MonotonicClock(); private FederationStateStore stateStore; - private static final int TEN_ROUNDS = 10; - private static final int TWENTY_ROUNDS = 20; + private static final int NUM_APPS_10 = 10; + private static final int NUM_APPS_20 = 20; protected abstract FederationStateStore createStateStore(); @@ -428,7 +428,7 @@ public void testGetApplicationsHomeSubClusterFilter() throws Exception { Set appHomeSubClusters = new HashSet<>(); - for (int i = 0; i < TEN_ROUNDS; i++) { + for (int i = 0; i < NUM_APPS_10; i++) { ApplicationId appId = ApplicationId.newInstance(now, i); SubClusterId subClusterId = SubClusterId.newInstance("SC1"); addApplicationHomeSC(appId, subClusterId); @@ -438,7 +438,7 @@ public void testGetApplicationsHomeSubClusterFilter() throws Exception { } // Add ApplicationHomeSC - SC2 - for (int i = TEN_ROUNDS; i < TWENTY_ROUNDS; i++) { + for (int i = 10; i < NUM_APPS_20; i++) { ApplicationId appId = ApplicationId.newInstance(now, i); SubClusterId subClusterId = SubClusterId.newInstance("SC2"); addApplicationHomeSC(appId, subClusterId); From 923622cb04ef6cc1f8c55c91947c53711378c73b Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Wed, 14 Sep 2022 09:48:23 +0800 Subject: [PATCH 19/25] YARN-11290. Improve Code Style. --- .../MySQL/FederationStateStoreStoredProcs.sql | 16 ++++---- .../FederationStateStoreStoreProcs.sql | 37 ++++++------------- .../impl/HSQLDBFederationStateStore.java | 5 ++- 3 files changed, 23 insertions(+), 35 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql index 9d2d3f78d1d33..6461cf2bd75e4 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql @@ -128,15 +128,15 @@ BEGIN applicationId, homeSubCluster, createTime - FROM - (SELECT - *, - @rownum := 0, - IF(homeSubCluster_IN = '', 1, (homeSubCluster = homeSubCluster_IN)) AS filter_result + FROM (SELECT + applicationId, + homeSubCluster, + createTime, + @rownum := 0 FROM applicationshomesubcluster - ORDER BY createTime DESC) AS app_home_sc - WHERE filter_result = 1 - AND (@rownum := @rownum + 1) <= limit_IN; + ORDER BY createTime DESC) AS applicationshomesubcluster + WHERE (homeSubCluster_IN = '' OR homeSubCluster = homeSubCluster_IN) + AND (@rownum := @rownum + 1) <= limit_IN; END // CREATE PROCEDURE sp_deleteApplicationHomeSubCluster( diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql index 32b919597094a..a7cc9390c0267 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql @@ -117,31 +117,18 @@ AS BEGIN DECLARE @errorMessage nvarchar(4000) BEGIN TRY - IF @homeSubCluster = '' - SELECT - [applicationId], - [homeSubCluster], - [createTime] - FROM(SELECT - [applicationId], - [homeSubCluster], - [createTime], - row_number() over(order by [createTime] desc) AS app_rank - FROM [dbo].[applicationsHomeSubCluster]) AS t - WHERE app_rank <= @limit; - ELSE - SELECT - [applicationId], - [homeSubCluster], - [createTime] - FROM(SELECT - [applicationId], - [homeSubCluster], - [createTime], - row_number() over(partition by [homeSubCluster] order by [createTime] desc) AS app_rank - FROM [dbo].[applicationsHomeSubCluster] - WHERE [homeSubCluster] = @homeSubCluster) AS t - WHERE app_rank <= @limit; + SELECT + [applicationId], + [homeSubCluster], + [createTime] + FROM(SELECT + [applicationId], + [homeSubCluster], + [createTime], + row_number() over(order by [createTime] desc) AS app_rank + FROM [dbo].[applicationsHomeSubCluster] + WHERE [homeSubCluster] = @homeSubCluster OR @homeSubCluster = '') AS t + WHERE app_rank <= @limit; END TRY BEGIN CATCH diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java index e15df3e489a54..fb2c91a276085 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java @@ -142,8 +142,9 @@ public class HSQLDBFederationStateStore extends SQLFederationStateStore { + " OUT storedHomeSubCluster_OUT varchar(256), OUT rowCount_OUT int)" + " MODIFIES SQL DATA BEGIN ATOMIC" + " INSERT INTO applicationsHomeSubCluster " - + " (applicationId,homeSubCluster) " - + " (SELECT applicationId_IN, homeSubCluster_IN" + + " (applicationId,homeSubCluster,createTime) " + + " (SELECT applicationId_IN, homeSubCluster_IN, " + + " NOW() AT TIME ZONE INTERVAL '0:00' HOUR TO MINUTE" + " FROM applicationsHomeSubCluster" + " WHERE applicationId = applicationId_IN" + " HAVING COUNT(*) = 0 );" From 490cf3c017704b38b5a193df7159d09164096507 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Wed, 14 Sep 2022 19:18:17 +0800 Subject: [PATCH 20/25] YARN-11290. Fix CheckStyle. --- .../federation/store/impl/SQLFederationStateStore.java | 2 +- .../federation/store/impl/HSQLDBFederationStateStore.java | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java index 48e6a50587a66..2a8ad7b0fceac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java @@ -762,7 +762,7 @@ public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( try { cstmt = getCallableStatement(CALL_SP_GET_APPLICATIONS_HOME_SUBCLUSTER); cstmt.setInt("limit_IN", maxAppsInStateStore); - String homeSubClusterIN = null; + String homeSubClusterIN = StringUtils.EMPTY; SubClusterId subClusterId = request.getSubClusterId(); if (subClusterId != null) { homeSubClusterIN = subClusterId.toString(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java index fb2c91a276085..828f13eabe101 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java @@ -180,9 +180,8 @@ public class HSQLDBFederationStateStore extends SQLFederationStateStore { + " SELECT applicationId, homeSubCluster, createTime" + " FROM applicationsHomeSubCluster " + " WHERE ROWNUM() <= limit_IN AND " - + " CASE WHEN homeSubCluster_IN IS NULL THEN 1 = 1 " - + " WHEN homeSubCluster_IN IS NOT NULL " - + " THEN homeSubCluster = homeSubCluster_IN END ORDER BY createTime desc; " + + " (homeSubCluster_IN = '' OR homeSubCluster = homeSubCluster_IN) " + + " ORDER BY createTime desc; " + " OPEN result; END"; private static final String SP_DELETEAPPLICATIONHOMESUBCLUSTER = From 32042dce9b87ee5c582cd5243a7addd916060813 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Thu, 15 Sep 2022 09:48:35 +0800 Subject: [PATCH 21/25] YARN-11290. Fix CodeStyle. --- .../impl/MemoryFederationStateStore.java | 20 ++------------ .../impl/ZookeeperFederationStateStore.java | 16 +++-------- .../utils/FederationStateStoreUtils.java | 27 +++++++++++++++++++ 3 files changed, 33 insertions(+), 30 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java index e42cb8ebec204..54c90bfbd7584 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java @@ -85,6 +85,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.yarn.server.federation.store.utils.FederationStateStoreUtils.filterHomeSubCluster; + /** * In-memory implementation of {@link FederationStateStore}. */ @@ -285,24 +287,6 @@ private ApplicationHomeSubCluster generateAppHomeSC(ApplicationId applicationId) return ApplicationHomeSubCluster.newInstance(applicationId, Time.now(), subClusterId); } - private boolean filterHomeSubCluster(SubClusterId filterSubCluster, - SubClusterId homeSubCluster) { - - // If the filter condition is empty, - // it means that homeSubCluster needs to be added - if (filterSubCluster == null) { - return true; - } - - // If the filter condition filterSubCluster is not empty, - // and filterSubCluster is equal to homeSubCluster, it needs to be added - if (filterSubCluster.equals(homeSubCluster)) { - return true; - } - - return false; - } - @Override public DeleteApplicationHomeSubClusterResponse deleteApplicationHomeSubCluster( DeleteApplicationHomeSubClusterRequest request) throws YarnException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java index 4a2adaef92f99..72cb5e92e0187 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java @@ -98,6 +98,8 @@ import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; +import static org.apache.hadoop.yarn.server.federation.store.utils.FederationStateStoreUtils.filterHomeSubCluster; + /** * ZooKeeper implementation of {@link FederationStateStore}. * @@ -272,8 +274,8 @@ public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( long start = clock.getTime(); SubClusterId requestSC = request.getSubClusterId(); List children = zkManager.getChildren(appsZNode); - List result = - children.stream().map(child -> generateAppHomeSC(child)) + List result = children.stream() + .map(child -> generateAppHomeSC(child)) .sorted(Comparator.comparing(ApplicationHomeSubCluster::getCreateTime).reversed()) .filter(appHomeSC -> filterHomeSubCluster(requestSC, appHomeSC.getHomeSubCluster())) .limit(maxAppsInStateStore) @@ -303,16 +305,6 @@ private ApplicationHomeSubCluster generateAppHomeSC(String appId) { return null; } - private boolean filterHomeSubCluster(SubClusterId filterSubCluster, - SubClusterId homeSubCluster) { - if (filterSubCluster == null) { - return true; - } else if (filterSubCluster.equals(homeSubCluster)) { - return true; - } - return false; - } - @Override public DeleteApplicationHomeSubClusterResponse deleteApplicationHomeSubCluster( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationStateStoreUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationStateStoreUtils.java index 7dc53f8e0acfc..52ef725fb2b94 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationStateStoreUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationStateStoreUtils.java @@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreInvalidInputException; import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreRetriableException; import org.apache.hadoop.yarn.server.federation.store.metrics.FederationStateStoreClientMetrics; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -279,4 +280,30 @@ public static void setPassword(HikariDataSource dataSource, String password) { LOG.debug("NULL Credentials specified for Store connection, so ignoring"); } } + + /** + * Filter HomeSubCluster based on Filter SubCluster. + * + * @param filterSubCluster filter query conditions + * @param homeSubCluster homeSubCluster + * @return return true, if match filter conditions, + * return false, if not match filter conditions. + */ + public static boolean filterHomeSubCluster(SubClusterId filterSubCluster, + SubClusterId homeSubCluster) { + + // If the filter condition is empty, + // it means that homeSubCluster needs to be added + if (filterSubCluster == null) { + return true; + } + + // If the filter condition filterSubCluster is not empty, + // and filterSubCluster is equal to homeSubCluster, it needs to be added + if (filterSubCluster.equals(homeSubCluster)) { + return true; + } + + return false; + } } From bd195f2eb021ed271cff0cd46bafe339d07e4a2e Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Sat, 17 Sep 2022 20:22:53 +0800 Subject: [PATCH 22/25] YARN-11290. Fix CheckStyle. --- .../SQLServer/FederationStateStoreStoreProcs.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql index a7cc9390c0267..bf83e473b08ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql @@ -127,7 +127,7 @@ AS BEGIN [createTime], row_number() over(order by [createTime] desc) AS app_rank FROM [dbo].[applicationsHomeSubCluster] - WHERE [homeSubCluster] = @homeSubCluster OR @homeSubCluster = '') AS t + WHERE [homeSubCluster] = @homeSubCluster OR @homeSubCluster = '') AS applicationsHomeSubCluster WHERE app_rank <= @limit; END TRY From b948fab6176e00c2cbfd39f948547d3e2674953e Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Fri, 23 Sep 2022 10:49:14 +0800 Subject: [PATCH 23/25] YARN-11290. Merge Trunk Branch. --- LICENSE-binary | 36 +- .../apache/hadoop/fs/LocalDirAllocator.java | 26 +- .../apache/hadoop/fs/RawLocalFileSystem.java | 5 + .../prefetch/ExecutorServiceFuturePool.java | 20 +- .../org/apache/hadoop/http/HttpServer2.java | 6 +- .../apache/hadoop/io/compress/BZip2Codec.java | 28 +- .../java/org/apache/hadoop/ipc/Server.java | 15 + .../apache/hadoop/ipc/metrics/RpcMetrics.java | 23 + .../metrics2/impl/MetricsRecordImpl.java | 4 +- .../hadoop/metrics2/sink/GraphiteSink.java | 269 +++++------ .../hadoop/metrics2/sink/StatsDSink.java | 6 +- .../ZKDelegationTokenSecretManager.java | 7 +- .../util/concurrent/HadoopExecutors.java | 5 +- .../hadoop/fs/TestLocalDirAllocator.java | 17 + .../apache/hadoop/fs/TestLocalFileSystem.java | 4 +- .../apache/hadoop/http/TestHttpServer.java | 17 +- .../hadoop/io/compress/TestBZip2Codec.java | 203 +++++++++ .../java/org/apache/hadoop/ipc/TestIPC.java | 7 +- .../java/org/apache/hadoop/ipc/TestRPC.java | 24 +- .../metrics2/impl/TestGraphiteMetrics.java | 215 --------- .../metrics2/sink/TestGraphiteMetrics.java | 219 +++++++++ .../{impl => sink}/TestStatsDMetrics.java | 14 +- .../java/org/apache/hadoop/net/TestDNS.java | 5 +- .../TestZKDelegationTokenSecretManager.java | 37 ++ .../hadoop/portmap/RpcProgramPortmap.java | 7 +- .../apache/hadoop/portmap/TestPortmap.java | 7 +- .../metrics/FederationRPCMBean.java | 4 + .../metrics/FederationRPCMetrics.java | 24 +- .../FederationRPCPerformanceMonitor.java | 6 +- .../server/federation/metrics/RBFMetrics.java | 2 +- .../resolver/ActiveNamenodeResolver.java | 30 +- .../resolver/MembershipNamenodeResolver.java | 99 ++-- .../federation/router/ConnectionPool.java | 2 +- .../federation/router/RBFConfigKeys.java | 6 + .../router/RouterClientProtocol.java | 5 +- .../federation/router/RouterRpcClient.java | 150 +++++-- .../federation/router/RouterRpcMonitor.java | 4 +- .../federation/router/RouterRpcServer.java | 2 +- .../src/main/resources/hdfs-rbf-default.xml | 19 + .../src/site/markdown/HDFSRouterFederation.md | 1 - .../federation/FederationTestUtils.java | 4 +- .../federation/MiniRouterDFSCluster.java | 22 + .../hdfs/server/federation/MockResolver.java | 50 ++- ...RouterRefreshFairnessPolicyController.java | 3 +- .../resolver/TestNamenodeResolver.java | 14 +- .../router/TestObserverWithRouter.java | 425 ++++++++++++++++++ .../router/TestRouterNamenodeHeartbeat.java | 6 +- .../router/TestRouterNamenodeMonitoring.java | 2 +- .../router/TestRouterNamenodeWebScheme.java | 2 +- .../router/TestRouterRPCClientRetries.java | 2 +- .../qjournal/client/QuorumJournalManager.java | 4 - .../hadoop/hdfs/qjournal/server/Journal.java | 14 +- .../qjournal/server/JournalNodeRpcServer.java | 2 + .../server/blockmanagement/BlockManager.java | 20 + .../blockmanagement/DatanodeManager.java | 2 +- .../PendingReconstructionBlocks.java | 8 + .../hadoop/hdfs/server/namenode/NameNode.java | 16 +- .../src/main/resources/hdfs-default.xml | 8 +- .../hadoop-hdfs/src/site/markdown/ViewFs.md | 26 +- .../apache/hadoop/hdfs/MiniDFSCluster.java | 2 + .../hadoop/hdfs/TestDatanodeReport.java | 6 +- .../client/TestQuorumJournalManager.java | 37 ++ .../hadoop/hdfs/server/namenode/TestFsck.java | 2 +- .../hdfs/server/namenode/TestHostsFiles.java | 35 ++ .../TestRefreshNamenodeReplicationConfig.java | 16 +- ...TestUpgradeDomainBlockPlacementPolicy.java | 6 +- .../server/namenode/ha/TestObserverNode.java | 44 ++ .../hadoop/hdfs/tools/TestDFSAdmin.java | 4 +- .../app/launcher/ContainerLauncherImpl.java | 21 +- .../launcher/TestContainerLauncherImpl.java | 28 +- .../input/BaseTestLineRecordReaderBZip2.java | 133 +++++- hadoop-project/pom.xml | 14 +- .../dev-support/findbugs-exclude.xml | 4 + hadoop-tools/hadoop-aws/pom.xml | 24 + .../apache/hadoop/fs/s3a/S3AFileSystem.java | 10 +- .../site/markdown/tools/hadoop-aws/testing.md | 14 + .../apache/hadoop/fs/s3a/S3ATestUtils.java | 4 + .../MySQL/FederationStateStoreTables.sql | 2 +- ...ql => FederationStateStoreStoredProcs.sql} | 0 .../SQLServer/FederationStateStoreTables.sql | 4 +- .../hadoop/yarn/conf/YarnConfiguration.java | 7 + .../hadoop-yarn/hadoop-yarn-common/pom.xml | 2 +- .../hadoop/yarn/webapp/view/JQueryUI.java | 2 +- .../jquery/jquery-ui-1.13.1.custom.min.js | 6 - .../jquery/jquery-ui-1.13.2.custom.min.js | 6 + .../src/main/resources/yarn-default.xml | 9 + .../hadoop-yarn-server-common/pom.xml | 19 + .../FederationDelegationTokenStateStore.java | 69 +++ .../store/FederationStateStore.java | 3 +- .../store/impl/SQLFederationStateStore.java | 28 +- .../impl/ZookeeperFederationStateStore.java | 21 + .../store/records/RouterMasterKey.java | 133 ++++++ .../store/records/RouterMasterKeyRequest.java | 43 ++ .../records/RouterMasterKeyResponse.java | 44 ++ .../records/RouterRMDTSecretManagerState.java | 52 +++ .../impl/pb/RouterMasterKeyPBImpl.java | 133 ++++++ .../impl/pb/RouterMasterKeyRequestPBImpl.java | 128 ++++++ .../pb/RouterMasterKeyResponsePBImpl.java | 128 ++++++ .../utils/FederationStateStoreFacade.java | 58 +++ .../proto/yarn_server_federation_protos.proto | 30 +- .../impl/FederationStateStoreBaseTest.java | 77 ++++ .../impl/HSQLDBFederationStateStore.java | 60 ++- .../store/impl/MySQLFederationStateStore.java | 48 ++ .../impl/SQLServerFederationStateStore.java | 66 +++ .../impl/TestSQLFederationStateStore.java | 16 + .../TestZookeeperFederationStateStore.java | 16 + .../TestFederationProtocolRecords.java | 22 + .../store/sql/FederationSQLAccuracyTest.java | 71 +++ .../TestFederationMySQLScriptAccuracy.java | 58 +++ ...TestFederationSQLServerScriptAccuracy.java | 58 +++ .../utils/TestFederationStateStoreFacade.java | 37 ++ .../AMRMProxyApplicationContext.java | 2 +- .../AMRMProxyApplicationContextImpl.java | 2 +- .../amrmproxy/AbstractRequestInterceptor.java | 2 +- .../amrmproxy/FederationInterceptor.java | 6 +- .../amrmproxy/RequestInterceptor.java | 28 +- .../runtime/DockerLinuxContainerRuntime.java | 70 ++- .../logaggregation/AppLogAggregatorImpl.java | 43 +- .../amrmproxy/BaseAMRMProxyTest.java | 8 +- .../PassThroughRequestInterceptor.java | 4 +- .../amrmproxy/TestAMRMProxyMetrics.java | 10 +- .../amrmproxy/TestAMRMProxyService.java | 52 +-- .../amrmproxy/TestFederationInterceptor.java | 36 +- .../TestFederationInterceptorSecure.java | 24 +- .../TestableFederationInterceptor.java | 2 +- .../runtime/TestDockerContainerRuntime.java | 47 +- .../TestLogAggregationService.java | 75 +++- .../FederationStateStoreService.java | 22 + .../yarn/server/router/RouterMetrics.java | 34 +- .../webapp/FederationInterceptorREST.java | 55 ++- .../yarn/server/router/TestRouterMetrics.java | 33 ++ .../MockDefaultRequestInterceptorREST.java | 102 +++++ .../webapp/TestFederationInterceptorREST.java | 71 +++ .../webapp/TestRouterWebServicesREST.java | 74 +-- .../pom.xml | 5 + .../hadoop-yarn-server-web-proxy/pom.xml | 21 +- .../server/webproxy/TestAppReportFetcher.java | 27 +- .../yarn/server/webproxy/TestProxyCA.java | 295 ++++++------ .../server/webproxy/TestProxyUriUtils.java | 59 +-- .../webproxy/TestWebAppProxyServer.java | 28 +- .../webproxy/TestWebAppProxyServlet.java | 233 +++++----- .../webproxy/amfilter/TestAmFilter.java | 64 +-- .../amfilter/TestAmFilterInitializer.java | 20 +- .../webproxy/amfilter/TestSecureAmFilter.java | 32 +- .../src/site/markdown/DockerContainers.md | 20 +- 145 files changed, 4487 insertions(+), 1189 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBZip2Codec.java delete mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGraphiteMetrics.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestGraphiteMetrics.java rename hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/{impl => sink}/TestStatsDMetrics.java (91%) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestObserverWithRouter.java rename hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/{FederationStateStoreStoreProcs.sql => FederationStateStoreStoredProcs.sql} (100%) delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.13.1.custom.min.js create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.13.2.custom.min.js create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationDelegationTokenStateStore.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/RouterMasterKey.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/RouterMasterKeyRequest.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/RouterMasterKeyResponse.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/RouterRMDTSecretManagerState.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/RouterMasterKeyPBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/RouterMasterKeyRequestPBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/RouterMasterKeyResponsePBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/MySQLFederationStateStore.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLServerFederationStateStore.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/sql/FederationSQLAccuracyTest.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/sql/TestFederationMySQLScriptAccuracy.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/sql/TestFederationSQLServerScriptAccuracy.java diff --git a/LICENSE-binary b/LICENSE-binary index 05b7dcfbe36dd..b09c5e8ab7393 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -305,12 +305,12 @@ net.minidev:json-smart:2.4.7 org.apache.avro:avro:1.9.2 org.apache.commons:commons-collections4:4.2 org.apache.commons:commons-compress:1.21 -org.apache.commons:commons-configuration2:2.1.1 +org.apache.commons:commons-configuration2:2.8.0 org.apache.commons:commons-csv:1.0 org.apache.commons:commons-digester:1.8.1 org.apache.commons:commons-lang3:3.12.0 org.apache.commons:commons-math3:3.6.1 -org.apache.commons:commons-text:1.4 +org.apache.commons:commons-text:1.9 org.apache.commons:commons-validator:1.6 org.apache.curator:curator-client:5.2.0 org.apache.curator:curator-framework:5.2.0 @@ -325,21 +325,21 @@ org.apache.htrace:htrace-core4:4.1.0-incubating org.apache.httpcomponents:httpclient:4.5.6 org.apache.httpcomponents:httpcore:4.4.10 org.apache.kafka:kafka-clients:2.8.1 -org.apache.kerby:kerb-admin:1.0.1 -org.apache.kerby:kerb-client:1.0.1 -org.apache.kerby:kerb-common:1.0.1 -org.apache.kerby:kerb-core:1.0.1 -org.apache.kerby:kerb-crypto:1.0.1 -org.apache.kerby:kerb-identity:1.0.1 -org.apache.kerby:kerb-server:1.0.1 -org.apache.kerby:kerb-simplekdc:1.0.1 -org.apache.kerby:kerb-util:1.0.1 -org.apache.kerby:kerby-asn1:1.0.1 -org.apache.kerby:kerby-config:1.0.1 -org.apache.kerby:kerby-pkix:1.0.1 -org.apache.kerby:kerby-util:1.0.1 -org.apache.kerby:kerby-xdr:1.0.1 -org.apache.kerby:token-provider:1.0.1 +org.apache.kerby:kerb-admin:2.0.2 +org.apache.kerby:kerb-client:2.0.2 +org.apache.kerby:kerb-common:2.0.2 +org.apache.kerby:kerb-core:2.0.2 +org.apache.kerby:kerb-crypto:2.0.2 +org.apache.kerby:kerb-identity:2.0.2 +org.apache.kerby:kerb-server:2.0.2 +org.apache.kerby:kerb-simplekdc:2.0.2 +org.apache.kerby:kerb-util:2.0.2 +org.apache.kerby:kerby-asn1:2.0.2 +org.apache.kerby:kerby-config:2.0.2 +org.apache.kerby:kerby-pkix:2.0.2 +org.apache.kerby:kerby-util:2.0.2 +org.apache.kerby:kerby-xdr:2.0.2 +org.apache.kerby:token-provider:2.0.2 org.apache.solr:solr-solrj:8.8.2 org.apache.yetus:audience-annotations:0.5.0 org.apache.zookeeper:zookeeper:3.6.3 @@ -523,7 +523,7 @@ junit:junit:4.13.2 HSQL License ------------ -org.hsqldb:hsqldb:2.3.4 +org.hsqldb:hsqldb:2.5.2 JDOM License diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java index f6c9d3c7cb0dd..774e015b37343 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java @@ -396,6 +396,10 @@ public Path getLocalPathForWrite(String pathStr, long size, Context ctx = confChanged(conf); int numDirs = ctx.localDirs.length; int numDirsSearched = 0; + // Max capacity in any directory + long maxCapacity = 0; + String errorText = null; + IOException diskException = null; //remove the leading slash from the path (to make sure that the uri //resolution results in a valid path on the dir being checked) if (pathStr.startsWith("/")) { @@ -444,9 +448,18 @@ public Path getLocalPathForWrite(String pathStr, long size, int dirNum = ctx.getAndIncrDirNumLastAccessed(randomInc); while (numDirsSearched < numDirs) { long capacity = ctx.dirDF[dirNum].getAvailable(); + if (capacity > maxCapacity) { + maxCapacity = capacity; + } if (capacity > size) { - returnPath = - createPath(ctx.localDirs[dirNum], pathStr, checkWrite); + try { + returnPath = createPath(ctx.localDirs[dirNum], pathStr, + checkWrite); + } catch (IOException e) { + errorText = e.getMessage(); + diskException = e; + LOG.debug("DiskException caught for dir {}", ctx.localDirs[dirNum], e); + } if (returnPath != null) { ctx.getAndIncrDirNumLastAccessed(numDirsSearched); break; @@ -462,8 +475,13 @@ public Path getLocalPathForWrite(String pathStr, long size, } //no path found - throw new DiskErrorException("Could not find any valid local " + - "directory for " + pathStr); + String newErrorText = "Could not find any valid local directory for " + + pathStr + " with requested size " + size + + " as the max capacity in any directory is " + maxCapacity; + if (errorText != null) { + newErrorText = newErrorText + " due to " + errorText; + } + throw new DiskErrorException(newErrorText, diskException); } /** Creates a file on the local FS. Pass size as diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java index d9ceab9a054de..2f4f93099b5c9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -1326,4 +1326,9 @@ public boolean hasPathCapability(final Path path, final String capability) return super.hasPathCapability(path, capability); } } + + @VisibleForTesting + static void setUseDeprecatedFileStatus(boolean useDeprecatedFileStatus) { + RawLocalFileSystem.useDeprecatedFileStatus = useDeprecatedFileStatus; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/ExecutorServiceFuturePool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/ExecutorServiceFuturePool.java index 9ef50e50d7e5e..645de280394c6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/ExecutorServiceFuturePool.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/ExecutorServiceFuturePool.java @@ -22,8 +22,13 @@ import java.util.Locale; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import java.util.function.Supplier; +import org.slf4j.Logger; + +import org.apache.hadoop.util.concurrent.HadoopExecutors; + /** * A FuturePool implementation backed by a java.util.concurrent.ExecutorService. * @@ -37,7 +42,8 @@ * */ public class ExecutorServiceFuturePool { - private ExecutorService executor; + + private final ExecutorService executor; public ExecutorServiceFuturePool(ExecutorService executor) { this.executor = executor; @@ -64,6 +70,18 @@ public Future executeRunnable(final Runnable r) { return (Future) executor.submit(r::run); } + /** + * Utility to shutdown the {@link ExecutorService} used by this class. Will wait up to a + * certain timeout for the ExecutorService to gracefully shutdown. + * + * @param logger Logger + * @param timeout the maximum time to wait + * @param unit the time unit of the timeout argument + */ + public void shutdown(Logger logger, long timeout, TimeUnit unit) { + HadoopExecutors.shutdown(executor, logger, timeout, unit); + } + public String toString() { return String.format(Locale.ROOT, "ExecutorServiceFuturePool(executor=%s)", executor); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index 1db8c750cef93..178f761191b1e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -144,7 +144,7 @@ public final class HttpServer2 implements FilterContainer { public static final String HTTP_SOCKET_BACKLOG_SIZE_KEY = "hadoop.http.socket.backlog.size"; - public static final int HTTP_SOCKET_BACKLOG_SIZE_DEFAULT = 128; + public static final int HTTP_SOCKET_BACKLOG_SIZE_DEFAULT = 500; public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads"; public static final String HTTP_ACCEPTOR_COUNT_KEY = "hadoop.http.acceptor.count"; @@ -1967,4 +1967,8 @@ HttpServer2Metrics getMetrics() { return metrics; } + @VisibleForTesting + List getListeners() { + return listeners; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java index 1564ae9085520..7508def9a75c6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java @@ -335,6 +335,7 @@ private static class BZip2CompressionInputStream extends private boolean isSubHeaderStripped = false; private READ_MODE readMode = READ_MODE.CONTINUOUS; private long startingPos = 0L; + private boolean didInitialRead; // Following state machine handles different states of compressed stream // position @@ -480,24 +481,42 @@ public void close() throws IOException { */ public int read(byte[] b, int off, int len) throws IOException { + if (b == null) { + throw new NullPointerException(); + } + if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); + } + if (len == 0) { + return 0; + } if (needsReset) { internalReset(); } - - int result = 0; - result = this.input.read(b, off, len); + // When startingPos > 0, the stream should be initialized at the end of + // one block (which would correspond to be the start of another block). + // Thus, the initial read would technically be reading one byte passed a + // BZip2 end of block marker. To be consistent, we should also be + // updating the position to be one byte after the end of an block on the + // initial read. + boolean initializedAtEndOfBlock = + !didInitialRead && startingPos > 0 && readMode == READ_MODE.BYBLOCK; + int result = initializedAtEndOfBlock + ? BZip2Constants.END_OF_BLOCK + : this.input.read(b, off, len); if (result == BZip2Constants.END_OF_BLOCK) { this.posSM = POS_ADVERTISEMENT_STATE_MACHINE.ADVERTISE; } if (this.posSM == POS_ADVERTISEMENT_STATE_MACHINE.ADVERTISE) { - result = this.input.read(b, off, off + 1); + result = this.input.read(b, off, 1); // This is the precise time to update compressed stream position // to the client of this code. this.updatePos(true); this.posSM = POS_ADVERTISEMENT_STATE_MACHINE.HOLD; } + didInitialRead = true; return result; } @@ -513,6 +532,7 @@ private void internalReset() throws IOException { needsReset = false; BufferedInputStream bufferedIn = readStreamHeader(); input = new CBZip2InputStream(bufferedIn, this.readMode); + didInitialRead = false; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index f5753efe7e512..17366eb9569f1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -3156,6 +3156,7 @@ private void requeueCall(Call call) throws IOException, InterruptedException { try { internalQueueCall(call, false); + rpcMetrics.incrRequeueCalls(); } catch (RpcServerException rse) { call.doResponse(rse.getCause(), rse.getRpcStatusProto()); } @@ -4111,4 +4112,18 @@ public synchronized void run() { } } + @VisibleForTesting + CallQueueManager getCallQueue() { + return callQueue; + } + + @VisibleForTesting + void setCallQueue(CallQueueManager callQueue) { + this.callQueue = callQueue; + } + + @VisibleForTesting + void setRpcRequestClass(Class rpcRequestClass) { + this.rpcRequestClass = rpcRequestClass; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java index bf21e3865fa8a..282eca3cf8373 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java @@ -128,6 +128,8 @@ public static RpcMetrics create(Server server, Configuration conf) { MutableCounterLong rpcClientBackoff; @Metric("Number of Slow RPC calls") MutableCounterLong rpcSlowCalls; + @Metric("Number of requeue calls") + MutableCounterLong rpcRequeueCalls; @Metric("Number of open connections") public int numOpenConnections() { return server.getNumOpenConnections(); @@ -304,6 +306,13 @@ public void incrSlowRpc() { rpcSlowCalls.incr(); } + /** + * Increments the Requeue Calls counter. + */ + public void incrRequeueCalls() { + rpcRequeueCalls.incr(); + } + /** * Returns a MutableRate Counter. * @return Mutable Rate @@ -344,6 +353,15 @@ public long getRpcSlowCalls() { return rpcSlowCalls.value(); } + /** + * Returns the number of requeue calls. + * @return long + */ + @VisibleForTesting + public long getRpcRequeueCalls() { + return rpcRequeueCalls.value(); + } + public MutableRate getDeferredRpcProcessingTime() { return deferredRpcProcessingTime; } @@ -364,4 +382,9 @@ public double getDeferredRpcProcessingStdDev() { public MetricsTag getTag(String tagName) { return registry.getTag(tagName); } + + @VisibleForTesting + public MutableCounterLong getRpcAuthorizationSuccesses() { + return rpcAuthorizationSuccesses; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordImpl.java index 9ffceaaa0ddda..b11f775a73db3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordImpl.java @@ -22,12 +22,14 @@ import static org.apache.hadoop.util.Preconditions.*; +import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsTag; import static org.apache.hadoop.metrics2.util.Contracts.*; -class MetricsRecordImpl extends AbstractMetricsRecord { +@VisibleForTesting +public class MetricsRecordImpl extends AbstractMetricsRecord { protected static final String DEFAULT_CONTEXT = "default"; private final long timestamp; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java index ea1bde3a75e03..e07260c99936f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java @@ -21,6 +21,7 @@ import org.apache.commons.configuration2.SubsetConfiguration; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsException; import org.apache.hadoop.metrics2.MetricsRecord; @@ -37,171 +38,173 @@ import java.nio.charset.StandardCharsets; /** - * A metrics sink that writes to a Graphite server + * A metrics sink that writes to a Graphite server. */ @InterfaceAudience.Public @InterfaceStability.Evolving public class GraphiteSink implements MetricsSink, Closeable { - private static final Logger LOG = - LoggerFactory.getLogger(GraphiteSink.class); - private static final String SERVER_HOST_KEY = "server_host"; - private static final String SERVER_PORT_KEY = "server_port"; - private static final String METRICS_PREFIX = "metrics_prefix"; - private String metricsPrefix = null; - private Graphite graphite = null; - - @Override - public void init(SubsetConfiguration conf) { - // Get Graphite host configurations. - final String serverHost = conf.getString(SERVER_HOST_KEY); - final int serverPort = Integer.parseInt(conf.getString(SERVER_PORT_KEY)); - - // Get Graphite metrics graph prefix. - metricsPrefix = conf.getString(METRICS_PREFIX); - if (metricsPrefix == null) - metricsPrefix = ""; - - graphite = new Graphite(serverHost, serverPort); - graphite.connect(); + private static final Logger LOG = + LoggerFactory.getLogger(GraphiteSink.class); + private static final String SERVER_HOST_KEY = "server_host"; + private static final String SERVER_PORT_KEY = "server_port"; + private static final String METRICS_PREFIX = "metrics_prefix"; + private String metricsPrefix = null; + private Graphite graphite = null; + + @Override + public void init(SubsetConfiguration conf) { + // Get Graphite host configurations. + final String serverHost = conf.getString(SERVER_HOST_KEY); + final int serverPort = Integer.parseInt(conf.getString(SERVER_PORT_KEY)); + + // Get Graphite metrics graph prefix. + metricsPrefix = conf.getString(METRICS_PREFIX); + if (metricsPrefix == null) { + metricsPrefix = ""; } - @Override - public void putMetrics(MetricsRecord record) { - StringBuilder lines = new StringBuilder(); - StringBuilder metricsPathPrefix = new StringBuilder(); - - // Configure the hierarchical place to display the graph. - metricsPathPrefix.append(metricsPrefix).append(".") - .append(record.context()).append(".").append(record.name()); - - for (MetricsTag tag : record.tags()) { - if (tag.value() != null) { - metricsPathPrefix.append(".") - .append(tag.name()) - .append("=") - .append(tag.value()); - } - } - - // The record timestamp is in milliseconds while Graphite expects an epoc time in seconds. - long timestamp = record.timestamp() / 1000L; + graphite = new Graphite(serverHost, serverPort); + graphite.connect(); + } + + @Override + public void putMetrics(MetricsRecord record) { + StringBuilder lines = new StringBuilder(); + StringBuilder metricsPathPrefix = new StringBuilder(); + + // Configure the hierarchical place to display the graph. + metricsPathPrefix.append(metricsPrefix).append(".") + .append(record.context()).append(".").append(record.name()); + + for (MetricsTag tag : record.tags()) { + if (tag.value() != null) { + metricsPathPrefix.append(".") + .append(tag.name()) + .append("=") + .append(tag.value()); + } + } - // Collect datapoints. - for (AbstractMetric metric : record.metrics()) { - lines.append( - metricsPathPrefix.toString() + "." - + metric.name().replace(' ', '.')).append(" ") - .append(metric.value()).append(" ").append(timestamp) - .append("\n"); - } + // The record timestamp is in milliseconds while Graphite expects an epoc time in seconds. + long timestamp = record.timestamp() / 1000L; - try { - graphite.write(lines.toString()); - } catch (Exception e) { - LOG.warn("Error sending metrics to Graphite", e); - try { - graphite.close(); - } catch (Exception e1) { - throw new MetricsException("Error closing connection to Graphite", e1); - } - } + // Collect datapoints. + for (AbstractMetric metric : record.metrics()) { + lines.append(metricsPathPrefix + "." + metric.name().replace(' ', '.')).append(" ") + .append(metric.value()).append(" ").append(timestamp) + .append("\n"); } - @Override - public void flush() { + try { + graphite.write(lines.toString()); + } catch (Exception e) { + LOG.warn("Error sending metrics to Graphite.", e); try { - graphite.flush(); - } catch (Exception e) { - LOG.warn("Error flushing metrics to Graphite", e); - try { - graphite.close(); - } catch (Exception e1) { - throw new MetricsException("Error closing connection to Graphite", e1); - } + graphite.close(); + } catch (Exception e1) { + throw new MetricsException("Error closing connection to Graphite", e1); } } - - @Override - public void close() throws IOException { - graphite.close(); + } + + @Override + public void flush() { + try { + graphite.flush(); + } catch (Exception e) { + LOG.warn("Error flushing metrics to Graphite.", e); + try { + graphite.close(); + } catch (Exception e1) { + throw new MetricsException("Error closing connection to Graphite.", e1); + } } + } - public static class Graphite { - private final static int MAX_CONNECTION_FAILURES = 5; + @Override + public void close() throws IOException { + graphite.close(); + } - private String serverHost; - private int serverPort; - private Writer writer = null; - private Socket socket = null; - private int connectionFailures = 0; + public static class Graphite { + private final static int MAX_CONNECTION_FAILURES = 5; - public Graphite(String serverHost, int serverPort) { - this.serverHost = serverHost; - this.serverPort = serverPort; - } + private String serverHost; + private int serverPort; + private Writer writer = null; + private Socket socket = null; + private int connectionFailures = 0; - public void connect() { - if (isConnected()) { - throw new MetricsException("Already connected to Graphite"); - } - if (tooManyConnectionFailures()) { - // return silently (there was ERROR in logs when we reached limit for the first time) - return; - } - try { + public Graphite(String serverHost, int serverPort) { + this.serverHost = serverHost; + this.serverPort = serverPort; + } + + public void connect() { + if (isConnected()) { + throw new MetricsException("Already connected to Graphite"); + } + if (tooManyConnectionFailures()) { + // return silently (there was ERROR in logs when we reached limit for the first time) + return; + } + try { // Open a connection to Graphite server. - socket = new Socket(serverHost, serverPort); + socket = new Socket(serverHost, serverPort); writer = new OutputStreamWriter(socket.getOutputStream(), StandardCharsets.UTF_8); - } catch (Exception e) { - connectionFailures++; - if (tooManyConnectionFailures()) { - // first time when connection limit reached, report to logs - LOG.error("Too many connection failures, would not try to connect again."); - } - throw new MetricsException("Error creating connection, " - + serverHost + ":" + serverPort, e); + } catch (Exception e) { + connectionFailures++; + if (tooManyConnectionFailures()) { + // first time when connection limit reached, report to logs + LOG.error("Too many connection failures, would not try to connect again."); } + throw new MetricsException("Error creating connection, " + + serverHost + ":" + serverPort, e); } + } - public void write(String msg) throws IOException { - if (!isConnected()) { - connect(); - } - if (isConnected()) { - writer.write(msg); - } + public void write(String msg) throws IOException { + if (!isConnected()) { + connect(); } - - public void flush() throws IOException { - if (isConnected()) { - writer.flush(); - } + if (isConnected()) { + writer.write(msg); } + } - public boolean isConnected() { - return socket != null && socket.isConnected() && !socket.isClosed(); + public void flush() throws IOException { + if (isConnected()) { + writer.flush(); } + } - public void close() throws IOException { - try { - if (writer != null) { - writer.close(); - } - } catch (IOException ex) { - if (socket != null) { - socket.close(); - } - } finally { - socket = null; - writer = null; - } - } + public boolean isConnected() { + return socket != null && socket.isConnected() && !socket.isClosed(); + } - private boolean tooManyConnectionFailures() { - return connectionFailures > MAX_CONNECTION_FAILURES; + public void close() throws IOException { + try { + if (writer != null) { + writer.close(); + } + } catch (IOException ex) { + if (socket != null) { + socket.close(); + } + } finally { + socket = null; + writer = null; } + } + private boolean tooManyConnectionFailures() { + return connectionFailures > MAX_CONNECTION_FAILURES; } + } + @VisibleForTesting + void setGraphite(Graphite graphite) { + this.graphite = graphite; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/StatsDSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/StatsDSink.java index d1ec47fdecb31..4f41c0b0057ce 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/StatsDSink.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/StatsDSink.java @@ -28,6 +28,7 @@ import org.apache.commons.configuration2.SubsetConfiguration; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricType; import org.apache.hadoop.metrics2.MetricsException; @@ -214,5 +215,8 @@ public void close() throws IOException { } } - + @VisibleForTesting + void setStatsd(StatsD statsd) { + this.statsd = statsd; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java index 909f1afbffa4f..fb9a2951f598a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java @@ -59,6 +59,7 @@ import org.apache.zookeeper.client.ZKClientConfig; import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.data.Id; +import org.apache.zookeeper.data.Stat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -265,7 +266,11 @@ public void startThreads() throws IOException { // So, let's explicitly create them. CuratorFramework nullNsFw = zkClient.usingNamespace(null); try { - nullNsFw.create().creatingParentContainersIfNeeded().forPath("/" + zkClient.getNamespace()); + String nameSpace = "/" + zkClient.getNamespace(); + Stat stat = nullNsFw.checkExists().forPath(nameSpace); + if (stat == null) { + nullNsFw.create().creatingParentContainersIfNeeded().forPath(nameSpace); + } } catch (Exception e) { throw new IOException("Could not create namespace", e); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java index 0bbceb59c31e7..6e2838bfe9c97 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java @@ -115,9 +115,8 @@ public static void shutdown(ExecutorService executorService, Logger logger, try { executorService.shutdown(); - logger.debug( - "Gracefully shutting down executor service. Waiting max {} {}", - timeout, unit); + logger.debug("Gracefully shutting down executor service {}. Waiting max {} {}", + executorService, timeout, unit); if (!executorService.awaitTermination(timeout, unit)) { logger.debug( "Executor service has not shutdown yet. Forcing. " diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java index acda898ea1342..939881f39df6d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java @@ -26,6 +26,7 @@ import java.util.NoSuchElementException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.Shell; @@ -532,4 +533,20 @@ public void testGetLocalPathForWriteForInvalidPaths() throws Exception { } } + /** + * Test to verify LocalDirAllocator log details to provide diagnostics when file creation fails. + * + * @throws Exception + */ + @Test(timeout = 30000) + public void testGetLocalPathForWriteForLessSpace() throws Exception { + String dir0 = buildBufferDir(ROOT, 0); + String dir1 = buildBufferDir(ROOT, 1); + conf.set(CONTEXT, dir0 + "," + dir1); + LambdaTestUtils.intercept(DiskErrorException.class, + String.format("Could not find any valid local directory for %s with requested size %s", + "p1/x", Long.MAX_VALUE - 1), "Expect a DiskErrorException.", + () -> dirAllocator.getLocalPathForWrite("p1/x", Long.MAX_VALUE - 1, conf)); + } } + diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java index 29ef6ca6c7afd..38e16221a4518 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java @@ -24,7 +24,6 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.test.Whitebox; import org.apache.hadoop.util.StringUtils; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; @@ -650,7 +649,8 @@ public void testFileStatusPipeFile() throws Exception { RawLocalFileSystem fs = spy(origFs); Configuration conf = mock(Configuration.class); fs.setConf(conf); - Whitebox.setInternalState(fs, "useDeprecatedFileStatus", false); + + RawLocalFileSystem.setUseDeprecatedFileStatus(false); Path path = new Path("/foo"); File pipe = mock(File.class); when(pipe.isFile()).thenReturn(false); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java index b1255d19d9086..37e3415546e2e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java @@ -29,7 +29,6 @@ import org.apache.hadoop.security.ShellBasedUnixGroupsMapping; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; -import org.apache.hadoop.test.Whitebox; import org.assertj.core.api.Assertions; import org.eclipse.jetty.server.ServerConnector; @@ -663,8 +662,7 @@ private HttpServer2 checkBindAddress(String host, int port, boolean findPort) HttpServer2 server = createServer(host, port); try { // not bound, ephemeral should return requested port (0 for ephemeral) - List listeners = (List) Whitebox.getInternalState(server, - "listeners"); + List listeners = server.getListeners(); ServerConnector listener = (ServerConnector)listeners.get(0); assertEquals(port, listener.getPort()); @@ -740,12 +738,21 @@ public void testBacklogSize() throws Exception Configuration conf = new Configuration(); conf.setInt(HttpServer2.HTTP_SOCKET_BACKLOG_SIZE_KEY, backlogSize); HttpServer2 srv = createServer("test", conf); - List listeners = (List) Whitebox.getInternalState(srv, - "listeners"); + List listeners = srv.getListeners(); ServerConnector listener = (ServerConnector)listeners.get(0); assertEquals(backlogSize, listener.getAcceptQueueSize()); } + @Test + public void testBacklogSize2() throws Exception + { + Configuration conf = new Configuration(); + HttpServer2 srv = createServer("test", conf); + List listeners = srv.getListeners(); + ServerConnector listener = (ServerConnector)listeners.get(0); + assertEquals(500, listener.getAcceptQueueSize()); + } + @Test public void testIdleTimeout() throws Exception { final int idleTimeout = 1000; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBZip2Codec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBZip2Codec.java new file mode 100644 index 0000000000000..9dd3215f90d5e --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBZip2Codec.java @@ -0,0 +1,203 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.io.compress; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +import org.apache.hadoop.thirdparty.com.google.common.primitives.Bytes; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.compress.SplittableCompressionCodec.READ_MODE; +import org.apache.hadoop.io.compress.bzip2.BZip2TextFileWriter; +import org.apache.hadoop.io.compress.bzip2.BZip2Utils; + +import static org.apache.hadoop.io.compress.SplittableCompressionCodec.READ_MODE.BYBLOCK; +import static org.apache.hadoop.io.compress.SplittableCompressionCodec.READ_MODE.CONTINUOUS; +import static org.apache.hadoop.io.compress.bzip2.BZip2TextFileWriter.BLOCK_SIZE; +import static org.apache.hadoop.util.Preconditions.checkArgument; +import static org.assertj.core.api.Assertions.assertThatNullPointerException; +import static org.assertj.core.api.AssertionsForClassTypes.assertThatExceptionOfType; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; + +public final class TestBZip2Codec { + + private static final long HEADER_LEN = 2; + + private Configuration conf; + private FileSystem fs; + private BZip2Codec codec; + private Decompressor decompressor; + private Path tempFile; + + @Before + public void setUp() throws Exception { + conf = new Configuration(); + + Path workDir = new Path(System.getProperty("test.build.data", "target"), + "data/" + getClass().getSimpleName()); + + Path inputDir = new Path(workDir, "input"); + tempFile = new Path(inputDir, "test.txt.bz2"); + + fs = workDir.getFileSystem(conf); + + codec = new BZip2Codec(); + codec.setConf(new Configuration(/* loadDefaults */ false)); + decompressor = CodecPool.getDecompressor(codec); + } + + @After + public void tearDown() throws Exception { + CodecPool.returnDecompressor(decompressor); + fs.delete(tempFile, /* recursive */ false); + } + + @Test + public void createInputStreamWithStartAndEnd() throws Exception { + byte[] data1 = newAlternatingByteArray(BLOCK_SIZE, 'a', 'b'); + byte[] data2 = newAlternatingByteArray(BLOCK_SIZE, 'c', 'd'); + byte[] data3 = newAlternatingByteArray(BLOCK_SIZE, 'e', 'f'); + + try (BZip2TextFileWriter writer = new BZip2TextFileWriter(tempFile, conf)) { + writer.write(data1); + writer.write(data2); + writer.write(data3); + } + long fileSize = fs.getFileStatus(tempFile).getLen(); + + List nextBlockOffsets = BZip2Utils.getNextBlockMarkerOffsets(tempFile, conf); + long block2Start = nextBlockOffsets.get(0); + long block3Start = nextBlockOffsets.get(1); + + try (SplitCompressionInputStream stream = newCompressionStream(tempFile, 0, fileSize, + BYBLOCK)) { + assertEquals(0, stream.getPos()); + assertCasesWhereReadDoesNotAdvanceStream(stream); + assertReadingAtPositionZero(stream, data1); + assertCasesWhereReadDoesNotAdvanceStream(stream); + assertReadingPastEndOfBlock(stream, block2Start, data2); + assertReadingPastEndOfBlock(stream, block3Start, data3); + assertEquals(-1, stream.read()); + } + + try (SplitCompressionInputStream stream = newCompressionStream(tempFile, 1, fileSize - 1, + BYBLOCK)) { + assertEquals(block2Start, stream.getPos()); + assertCasesWhereReadDoesNotAdvanceStream(stream); + assertReadingPastEndOfBlock(stream, block2Start, data2); + assertCasesWhereReadDoesNotAdvanceStream(stream); + assertReadingPastEndOfBlock(stream, block3Start, data3); + assertEquals(-1, stream.read()); + } + + // With continuous mode, only starting at or after the stream header is + // supported. + byte[] allData = Bytes.concat(data1, data2, data3); + assertReadingWithContinuousMode(tempFile, 0, fileSize, allData); + assertReadingWithContinuousMode(tempFile, HEADER_LEN, fileSize - HEADER_LEN, allData); + } + + private void assertReadingWithContinuousMode(Path file, long start, long length, + byte[] expectedData) throws IOException { + try (SplitCompressionInputStream stream = newCompressionStream(file, start, length, + CONTINUOUS)) { + assertEquals(HEADER_LEN, stream.getPos()); + + assertRead(stream, expectedData); + assertEquals(-1, stream.read()); + + // When specifying CONTINUOUS read mode, the position ends up not being + // updated at all. + assertEquals(HEADER_LEN, stream.getPos()); + } + } + + private SplitCompressionInputStream newCompressionStream(Path file, long start, long length, + READ_MODE readMode) throws IOException { + FSDataInputStream rawIn = fs.open(file); + rawIn.seek(start); + long end = start + length; + return codec.createInputStream(rawIn, decompressor, start, end, readMode); + } + + private static byte[] newAlternatingByteArray(int size, int... choices) { + checkArgument(choices.length > 1); + byte[] result = new byte[size]; + for (int i = 0; i < size; i++) { + result[i] = (byte) choices[i % choices.length]; + } + return result; + } + + private static void assertCasesWhereReadDoesNotAdvanceStream(SplitCompressionInputStream in) + throws IOException { + long initialPos = in.getPos(); + + assertEquals(0, in.read(new byte[0])); + + assertThatNullPointerException().isThrownBy(() -> in.read(null, 0, 1)); + assertThatExceptionOfType(IndexOutOfBoundsException.class).isThrownBy( + () -> in.read(new byte[5], -1, 2)); + assertThatExceptionOfType(IndexOutOfBoundsException.class).isThrownBy( + () -> in.read(new byte[5], 0, -1)); + assertThatExceptionOfType(IndexOutOfBoundsException.class).isThrownBy( + () -> in.read(new byte[5], 1, 5)); + + assertEquals(initialPos, in.getPos()); + } + + private static void assertReadingAtPositionZero(SplitCompressionInputStream in, + byte[] expectedData) throws IOException { + byte[] buffer = new byte[expectedData.length]; + assertEquals(1, in.read(buffer, 0, 1)); + assertEquals(expectedData[0], buffer[0]); + assertEquals(0, in.getPos()); + + IOUtils.readFully(in, buffer, 1, expectedData.length - 1); + assertArrayEquals(expectedData, buffer); + assertEquals(0, in.getPos()); + } + + private static void assertReadingPastEndOfBlock(SplitCompressionInputStream in, + long endOfBlockPos, byte[] expectedData) throws IOException { + byte[] buffer = new byte[expectedData.length]; + assertEquals(1, in.read(buffer)); + assertEquals(expectedData[0], buffer[0]); + assertEquals(endOfBlockPos + 1, in.getPos()); + + IOUtils.readFully(in, buffer, 1, expectedData.length - 1); + assertArrayEquals(expectedData, buffer); + assertEquals(endOfBlockPos + 1, in.getPos()); + } + + private static void assertRead(InputStream in, byte[] expectedData) throws IOException { + byte[] buffer = new byte[expectedData.length]; + IOUtils.readFully(in, buffer); + assertArrayEquals(expectedData, buffer); + } +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java index 1e780793a6d24..ffa17224b03bf 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java @@ -92,7 +92,6 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.test.Whitebox; import org.apache.hadoop.util.StringUtils; import org.assertj.core.api.Condition; import org.junit.Assert; @@ -938,7 +937,6 @@ public void testIpcWithReaderQueuing() throws Exception { // goal is to jam a handler with a connection, fill the callq with // connections, in turn jamming the readers - then flood the server and // ensure that the listener blocks when the reader connection queues fill - @SuppressWarnings("unchecked") private void checkBlocking(int readers, int readerQ, int callQ) throws Exception { int handlers = 1; // makes it easier @@ -958,9 +956,8 @@ private void checkBlocking(int readers, int readerQ, int callQ) throws Exception // start server final TestServerQueue server = new TestServerQueue(clients, readers, callQ, handlers, conf); - CallQueueManager spy = spy( - (CallQueueManager)Whitebox.getInternalState(server, "callQueue")); - Whitebox.setInternalState(server, "callQueue", spy); + CallQueueManager spy = spy(server.getCallQueue()); + server.setCallQueue(spy); final InetSocketAddress addr = NetUtils.getConnectAddress(server); server.start(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java index 7201b28ebab00..101750d72c86d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java @@ -52,7 +52,6 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.test.MockitoUtil; -import org.apache.hadoop.test.Whitebox; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -309,7 +308,7 @@ public ProtocolProxy getProxy( throws IOException { T proxy = (T) Proxy.newProxyInstance(protocol.getClassLoader(), new Class[] { protocol }, new StoppedInvocationHandler()); - return new ProtocolProxy(protocol, proxy, false); + return new ProtocolProxy<>(protocol, proxy, false); } @Override @@ -1219,10 +1218,8 @@ public void testClientBackOff() throws Exception { .setQueueSizePerHandler(1).setNumHandlers(1).setVerbose(true); server = setupTestServer(builder); - @SuppressWarnings("unchecked") - CallQueueManager spy = spy((CallQueueManager) Whitebox - .getInternalState(server, "callQueue")); - Whitebox.setInternalState(server, "callQueue", spy); + CallQueueManager spy = spy(server.getCallQueue()); + server.setCallQueue(spy); Exception lastException = null; proxy = getClient(addr, conf); @@ -1274,7 +1271,7 @@ public void testClientBackOffByResponseTime() throws Exception { GenericTestUtils.setLogLevel(DecayRpcScheduler.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(RPC.LOG, Level.DEBUG); - final List> res = new ArrayList>(); + final List> res = new ArrayList<>(); final ExecutorService executorService = Executors.newFixedThreadPool(numClients); conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0); @@ -1282,10 +1279,8 @@ public void testClientBackOffByResponseTime() throws Exception { final String ns = CommonConfigurationKeys.IPC_NAMESPACE + ".0"; Server server = setupDecayRpcSchedulerandTestServer(ns + "."); - @SuppressWarnings("unchecked") - CallQueueManager spy = spy((CallQueueManager) Whitebox - .getInternalState(server, "callQueue")); - Whitebox.setInternalState(server, "callQueue", spy); + CallQueueManager spy = spy(server.getCallQueue()); + server.setCallQueue(spy); Exception lastException = null; proxy = getClient(addr, conf); @@ -1624,11 +1619,8 @@ public RpcStatusProto getRpcStatusProto() { RPC.Builder builder = newServerBuilder(conf) .setQueueSizePerHandler(1).setNumHandlers(1).setVerbose(true); server = setupTestServer(builder); - Whitebox.setInternalState( - server, "rpcRequestClass", FakeRequestClass.class); - MutableCounterLong authMetric = - (MutableCounterLong)Whitebox.getInternalState( - server.getRpcMetrics(), "rpcAuthorizationSuccesses"); + server.setRpcRequestClass(FakeRequestClass.class); + MutableCounterLong authMetric = server.getRpcMetrics().getRpcAuthorizationSuccesses(); proxy = getClient(addr, conf); boolean isDisconnected = true; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGraphiteMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGraphiteMetrics.java deleted file mode 100644 index 743080acd7a5e..0000000000000 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGraphiteMetrics.java +++ /dev/null @@ -1,215 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.metrics2.impl; - -import org.apache.hadoop.metrics2.AbstractMetric; -import org.apache.hadoop.metrics2.MetricsRecord; -import org.apache.hadoop.metrics2.MetricsTag; -import org.apache.hadoop.metrics2.sink.GraphiteSink; -import org.apache.hadoop.test.Whitebox; -import org.junit.Test; -import org.mockito.ArgumentCaptor; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.Collections; - -import static org.junit.Assert.assertEquals; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.reset; - - -public class TestGraphiteMetrics { - private AbstractMetric makeMetric(String name, Number value) { - AbstractMetric metric = mock(AbstractMetric.class); - when(metric.name()).thenReturn(name); - when(metric.value()).thenReturn(value); - return metric; - } - - private GraphiteSink.Graphite makeGraphite() { - GraphiteSink.Graphite mockGraphite = mock(GraphiteSink.Graphite.class); - when(mockGraphite.isConnected()).thenReturn(true); - return mockGraphite; - } - - @Test - public void testPutMetrics() { - GraphiteSink sink = new GraphiteSink(); - List tags = new ArrayList(); - tags.add(new MetricsTag(MsInfo.Context, "all")); - tags.add(new MetricsTag(MsInfo.Hostname, "host")); - Set metrics = new HashSet(); - metrics.add(makeMetric("foo1", 1.25)); - metrics.add(makeMetric("foo2", 2.25)); - MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 10000, tags, metrics); - - ArgumentCaptor argument = ArgumentCaptor.forClass(String.class); - final GraphiteSink.Graphite mockGraphite = makeGraphite(); - Whitebox.setInternalState(sink, "graphite", mockGraphite); - sink.putMetrics(record); - - try { - verify(mockGraphite).write(argument.capture()); - } catch (IOException e) { - e.printStackTrace(); - } - - String result = argument.getValue(); - - assertEquals(true, - result.equals("null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n" + - "null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n") || - result.equals("null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n" + - "null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n")); - } - - @Test - public void testPutMetrics2() { - GraphiteSink sink = new GraphiteSink(); - List tags = new ArrayList(); - tags.add(new MetricsTag(MsInfo.Context, "all")); - tags.add(new MetricsTag(MsInfo.Hostname, null)); - Set metrics = new HashSet(); - metrics.add(makeMetric("foo1", 1)); - metrics.add(makeMetric("foo2", 2)); - MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 10000, tags, metrics); - - - ArgumentCaptor argument = ArgumentCaptor.forClass(String.class); - final GraphiteSink.Graphite mockGraphite = makeGraphite(); - Whitebox.setInternalState(sink, "graphite", mockGraphite); - sink.putMetrics(record); - - try { - verify(mockGraphite).write(argument.capture()); - } catch (IOException e) { - e.printStackTrace(); - } - - String result = argument.getValue(); - - assertEquals(true, - result.equals("null.all.Context.Context=all.foo1 1 10\n" + - "null.all.Context.Context=all.foo2 2 10\n") || - result.equals("null.all.Context.Context=all.foo2 2 10\n" + - "null.all.Context.Context=all.foo1 1 10\n")); - } - - /** - * Assert that timestamps are converted correctly, ticket HADOOP-11182 - */ - @Test - public void testPutMetrics3() { - - // setup GraphiteSink - GraphiteSink sink = new GraphiteSink(); - final GraphiteSink.Graphite mockGraphite = makeGraphite(); - Whitebox.setInternalState(sink, "graphite", mockGraphite); - - // given two metrics records with timestamps 1000 milliseconds apart. - List tags = Collections.emptyList(); - Set metrics = new HashSet(); - metrics.add(makeMetric("foo1", 1)); - MetricsRecord record1 = new MetricsRecordImpl(MsInfo.Context, 1000000000000L, tags, metrics); - MetricsRecord record2 = new MetricsRecordImpl(MsInfo.Context, 1000000001000L, tags, metrics); - - sink.putMetrics(record1); - sink.putMetrics(record2); - - sink.flush(); - try { - sink.close(); - } catch(IOException e) { - e.printStackTrace(); - } - - // then the timestamps in the graphite stream should differ by one second. - try { - verify(mockGraphite).write(eq("null.default.Context.foo1 1 1000000000\n")); - verify(mockGraphite).write(eq("null.default.Context.foo1 1 1000000001\n")); - } catch (IOException e) { - e.printStackTrace(); - } - } - - @Test - public void testFailureAndPutMetrics() throws IOException { - GraphiteSink sink = new GraphiteSink(); - List tags = new ArrayList(); - tags.add(new MetricsTag(MsInfo.Context, "all")); - tags.add(new MetricsTag(MsInfo.Hostname, "host")); - Set metrics = new HashSet(); - metrics.add(makeMetric("foo1", 1.25)); - metrics.add(makeMetric("foo2", 2.25)); - MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 10000, tags, metrics); - - final GraphiteSink.Graphite mockGraphite = makeGraphite(); - Whitebox.setInternalState(sink, "graphite", mockGraphite); - - // throw exception when first try - doThrow(new IOException("IO exception")).when(mockGraphite).write(anyString()); - - sink.putMetrics(record); - verify(mockGraphite).write(anyString()); - verify(mockGraphite).close(); - - // reset mock and try again - reset(mockGraphite); - when(mockGraphite.isConnected()).thenReturn(false); - - ArgumentCaptor argument = ArgumentCaptor.forClass(String.class); - sink.putMetrics(record); - - verify(mockGraphite).write(argument.capture()); - String result = argument.getValue(); - - assertEquals(true, - result.equals("null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n" + - "null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n") || - result.equals("null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n" + - "null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n")); - } - - @Test - public void testClose(){ - GraphiteSink sink = new GraphiteSink(); - final GraphiteSink.Graphite mockGraphite = makeGraphite(); - Whitebox.setInternalState(sink, "graphite", mockGraphite); - try { - sink.close(); - } catch (IOException ioe) { - ioe.printStackTrace(); - } - - try { - verify(mockGraphite).close(); - } catch (IOException ioe) { - ioe.printStackTrace(); - } - } -} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestGraphiteMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestGraphiteMetrics.java new file mode 100644 index 0000000000000..9ea81c6e4c62e --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestGraphiteMetrics.java @@ -0,0 +1,219 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.metrics2.sink; + +import org.apache.hadoop.metrics2.AbstractMetric; +import org.apache.hadoop.metrics2.MetricsRecord; +import org.apache.hadoop.metrics2.MetricsTag; +import org.apache.hadoop.metrics2.impl.MetricsRecordImpl; +import org.apache.hadoop.metrics2.impl.MsInfo; +import org.junit.Test; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.Collections; + +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.reset; + + +public class TestGraphiteMetrics { + private AbstractMetric makeMetric(String name, Number value) { + AbstractMetric metric = mock(AbstractMetric.class); + when(metric.name()).thenReturn(name); + when(metric.value()).thenReturn(value); + return metric; + } + + private GraphiteSink.Graphite makeGraphite() { + GraphiteSink.Graphite mockGraphite = mock(GraphiteSink.Graphite.class); + when(mockGraphite.isConnected()).thenReturn(true); + return mockGraphite; + } + + @Test + public void testPutMetrics() { + GraphiteSink sink = new GraphiteSink(); + List tags = new ArrayList<>(); + tags.add(new MetricsTag(MsInfo.Context, "all")); + tags.add(new MetricsTag(MsInfo.Hostname, "host")); + Set metrics = new HashSet<>(); + metrics.add(makeMetric("foo1", 1.25)); + metrics.add(makeMetric("foo2", 2.25)); + MetricsRecord record = + new MetricsRecordImpl(MsInfo.Context, 10000, tags, metrics); + + ArgumentCaptor argument = ArgumentCaptor.forClass(String.class); + final GraphiteSink.Graphite mockGraphite = makeGraphite(); + sink.setGraphite(mockGraphite); + sink.putMetrics(record); + + try { + verify(mockGraphite).write(argument.capture()); + } catch (IOException e) { + e.printStackTrace(); + } + + String result = argument.getValue(); + + assertEquals(true, + result.equals("null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n" + + "null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n") || + result.equals("null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n" + + "null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n")); + } + + @Test + public void testPutMetrics2() throws IllegalAccessException { + GraphiteSink sink = new GraphiteSink(); + List tags = new ArrayList<>(); + tags.add(new MetricsTag(MsInfo.Context, "all")); + tags.add(new MetricsTag(MsInfo.Hostname, null)); + Set metrics = new HashSet<>(); + metrics.add(makeMetric("foo1", 1)); + metrics.add(makeMetric("foo2", 2)); + MetricsRecord record = + new MetricsRecordImpl(MsInfo.Context, 10000, tags, metrics); + + ArgumentCaptor argument = ArgumentCaptor.forClass(String.class); + final GraphiteSink.Graphite mockGraphite = makeGraphite(); + sink.setGraphite(mockGraphite); + sink.putMetrics(record); + + try { + verify(mockGraphite).write(argument.capture()); + } catch (IOException e) { + e.printStackTrace(); + } + + String result = argument.getValue(); + + assertEquals(true, + result.equals("null.all.Context.Context=all.foo1 1 10\n" + + "null.all.Context.Context=all.foo2 2 10\n") || + result.equals("null.all.Context.Context=all.foo2 2 10\n" + + "null.all.Context.Context=all.foo1 1 10\n")); + } + + /** + * Assert that timestamps are converted correctly, ticket HADOOP-11182. + */ + @Test + public void testPutMetrics3() throws IllegalAccessException { + + // setup GraphiteSink + GraphiteSink sink = new GraphiteSink(); + final GraphiteSink.Graphite mockGraphite = makeGraphite(); + sink.setGraphite(mockGraphite); + + // given two metrics records with timestamps 1000 milliseconds apart. + List tags = Collections.emptyList(); + Set metrics = new HashSet<>(); + metrics.add(makeMetric("foo1", 1)); + MetricsRecord record1 = + new MetricsRecordImpl(MsInfo.Context, 1000000000000L, tags, metrics); + MetricsRecord record2 = + new MetricsRecordImpl(MsInfo.Context, 1000000001000L, tags, metrics); + + sink.putMetrics(record1); + sink.putMetrics(record2); + + sink.flush(); + try { + sink.close(); + } catch(IOException e) { + e.printStackTrace(); + } + + // then the timestamps in the graphite stream should differ by one second. + try { + verify(mockGraphite).write(eq("null.default.Context.foo1 1 1000000000\n")); + verify(mockGraphite).write(eq("null.default.Context.foo1 1 1000000001\n")); + } catch (IOException e) { + e.printStackTrace(); + } + } + + @Test + public void testFailureAndPutMetrics() throws IOException, IllegalAccessException { + GraphiteSink sink = new GraphiteSink(); + List tags = new ArrayList<>(); + tags.add(new MetricsTag(MsInfo.Context, "all")); + tags.add(new MetricsTag(MsInfo.Hostname, "host")); + Set metrics = new HashSet<>(); + metrics.add(makeMetric("foo1", 1.25)); + metrics.add(makeMetric("foo2", 2.25)); + MetricsRecord record = + new MetricsRecordImpl(MsInfo.Context, 10000, tags, metrics); + + final GraphiteSink.Graphite mockGraphite = makeGraphite(); + sink.setGraphite(mockGraphite); + + // throw exception when first try + doThrow(new IOException("IO exception")).when(mockGraphite).write(anyString()); + + sink.putMetrics(record); + verify(mockGraphite).write(anyString()); + verify(mockGraphite).close(); + + // reset mock and try again + reset(mockGraphite); + when(mockGraphite.isConnected()).thenReturn(false); + + ArgumentCaptor argument = ArgumentCaptor.forClass(String.class); + sink.putMetrics(record); + + verify(mockGraphite).write(argument.capture()); + String result = argument.getValue(); + + assertEquals(true, + result.equals("null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n" + + "null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n") || + result.equals("null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n" + + "null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n")); + } + + @Test + public void testClose() throws IllegalAccessException { + GraphiteSink sink = new GraphiteSink(); + final GraphiteSink.Graphite mockGraphite = makeGraphite(); + sink.setGraphite(mockGraphite); + try { + sink.close(); + } catch (IOException ioe) { + ioe.printStackTrace(); + } + + try { + verify(mockGraphite).close(); + } catch (IOException ioe) { + ioe.printStackTrace(); + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestStatsDMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestStatsDMetrics.java similarity index 91% rename from hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestStatsDMetrics.java rename to hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestStatsDMetrics.java index 4cf4894ff8352..99a75787ad841 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestStatsDMetrics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestStatsDMetrics.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.metrics2.impl; +package org.apache.hadoop.metrics2.sink; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; @@ -35,9 +35,9 @@ import org.apache.hadoop.metrics2.MetricType; import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.MetricsTag; -import org.apache.hadoop.metrics2.sink.StatsDSink; +import org.apache.hadoop.metrics2.impl.MetricsRecordImpl; +import org.apache.hadoop.metrics2.impl.MsInfo; import org.apache.hadoop.metrics2.sink.StatsDSink.StatsD; -import org.apache.hadoop.test.Whitebox; import org.junit.Test; public class TestStatsDMetrics { @@ -52,7 +52,7 @@ private AbstractMetric makeMetric(String name, Number value, } @Test(timeout=3000) - public void testPutMetrics() throws IOException, InterruptedException { + public void testPutMetrics() throws IOException, IllegalAccessException { final StatsDSink sink = new StatsDSink(); List tags = new ArrayList(); tags.add(new MetricsTag(MsInfo.Hostname, "host")); @@ -69,7 +69,7 @@ public void testPutMetrics() throws IOException, InterruptedException { final StatsDSink.StatsD mockStatsD = new StatsD(sock.getLocalAddress().getHostName(), sock.getLocalPort()); - Whitebox.setInternalState(sink, "statsd", mockStatsD); + sink.setStatsd(mockStatsD); final DatagramPacket p = new DatagramPacket(new byte[8192], 8192); sink.putMetrics(record); sock.receive(p); @@ -87,7 +87,7 @@ public void testPutMetrics() throws IOException, InterruptedException { } @Test(timeout=3000) - public void testPutMetrics2() throws IOException { + public void testPutMetrics2() throws IOException, IllegalAccessException { StatsDSink sink = new StatsDSink(); List tags = new ArrayList(); tags.add(new MetricsTag(MsInfo.Hostname, null)); @@ -104,7 +104,7 @@ public void testPutMetrics2() throws IOException { final StatsDSink.StatsD mockStatsD = new StatsD(sock.getLocalAddress().getHostName(), sock.getLocalPort()); - Whitebox.setInternalState(sink, "statsd", mockStatsD); + sink.setStatsd(mockStatsD); final DatagramPacket p = new DatagramPacket(new byte[8192], 8192); sink.putMetrics(record); sock.receive(p); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java index 2504a6401a8d9..d33545ab6fe0d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java @@ -25,10 +25,12 @@ import javax.naming.CommunicationException; import javax.naming.NameNotFoundException; +import javax.naming.ServiceUnavailableException; import org.apache.hadoop.util.Time; import org.assertj.core.api.Assertions; +import org.junit.Assume; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -167,7 +169,7 @@ public void testRDNS() throws Exception { try { String s = DNS.reverseDns(localhost, null); LOG.info("Local reverse DNS hostname is " + s); - } catch (NameNotFoundException | CommunicationException e) { + } catch (NameNotFoundException | CommunicationException | ServiceUnavailableException e) { if (!localhost.isLinkLocalAddress() || localhost.isLoopbackAddress()) { //these addresses probably won't work with rDNS anyway, unless someone //has unusual entries in their DNS server mapping 1.0.0.127 to localhost @@ -176,6 +178,7 @@ public void testRDNS() throws Exception { + " Loopback=" + localhost.isLoopbackAddress() + " Linklocal=" + localhost.isLinkLocalAddress()); } + Assume.assumeNoException(e); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java index 84899e519b370..e92a25ea0ed8f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java @@ -28,6 +28,8 @@ import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.CuratorFrameworkFactory; import org.apache.curator.framework.api.ACLProvider; +import org.apache.curator.framework.api.CreateBuilder; +import org.apache.curator.framework.api.ProtectACLCreateModeStatPathAndBytesable; import org.apache.curator.retry.ExponentialBackoffRetry; import org.apache.curator.test.TestingServer; import org.apache.hadoop.conf.Configuration; @@ -38,6 +40,8 @@ import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier; import org.apache.hadoop.security.token.delegation.web.DelegationTokenManager; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZooDefs; import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.data.Id; @@ -535,5 +539,38 @@ public void testCreatingParentContainersIfNeeded() throws Exception { // Check if the created NameSpace exists. Stat stat = curatorFramework.checkExists().forPath(workingPath); Assert.assertNotNull(stat); + + tm1.destroy(); + curatorFramework.close(); + } + + @Test + public void testCreateNameSpaceRepeatedly() throws Exception { + + String connectString = zkServer.getConnectString(); + RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3); + Configuration conf = getSecretConf(connectString); + CuratorFramework curatorFramework = + CuratorFrameworkFactory.builder(). + connectString(connectString). + retryPolicy(retryPolicy). + build(); + curatorFramework.start(); + + String workingPath = "/" + conf.get(ZKDelegationTokenSecretManager.ZK_DTSM_ZNODE_WORKING_PATH, + ZKDelegationTokenSecretManager.ZK_DTSM_ZNODE_WORKING_PATH_DEAFULT) + "/ZKDTSMRoot-Test"; + CreateBuilder createBuilder = curatorFramework.create(); + ProtectACLCreateModeStatPathAndBytesable createModeStat = + createBuilder.creatingParentContainersIfNeeded(); + createModeStat.forPath(workingPath); + + // Check if the created NameSpace exists. + Stat stat = curatorFramework.checkExists().forPath(workingPath); + Assert.assertNotNull(stat); + + // Repeated creation will throw NodeExists exception + LambdaTestUtils.intercept(KeeperException.class, + "KeeperErrorCode = NodeExists for "+workingPath, + () -> createModeStat.forPath(workingPath)); } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java index 7b33a644fbe76..a585dbc6b20b0 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java @@ -18,6 +18,7 @@ package org.apache.hadoop.portmap; import java.util.concurrent.ConcurrentHashMap; +import java.util.Map; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; @@ -54,7 +55,7 @@ final class RpcProgramPortmap extends IdleStateHandler { private static final Logger LOG = LoggerFactory.getLogger(RpcProgramPortmap.class); - private final ConcurrentHashMap map = new ConcurrentHashMap(); + private final ConcurrentHashMap map = new ConcurrentHashMap<>(); /** ChannelGroup that remembers all active channels for gracefully shutdown. */ private final ChannelGroup allChannels; @@ -208,4 +209,8 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable t) { LOG.warn("Encountered ", t); ctx.channel().close(); } + + public Map getMap() { + return map; + } } diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java index 8ebf9d03c6c30..84fa71a269d71 100644 --- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java +++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java @@ -31,7 +31,6 @@ import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.CredentialsNone; import org.apache.hadoop.oncrpc.security.VerifierNone; -import org.apache.hadoop.test.Whitebox; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -76,7 +75,7 @@ public void testIdle() throws InterruptedException, IOException { } @Test(timeout = 10000) - public void testRegistration() throws IOException, InterruptedException { + public void testRegistration() throws IOException, InterruptedException, IllegalAccessException { XDR req = new XDR(); RpcCall.getInstance(++xid, RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION, @@ -100,9 +99,7 @@ public void testRegistration() throws IOException, InterruptedException { // Give the server a chance to process the request Thread.sleep(100); boolean found = false; - @SuppressWarnings("unchecked") - Map map = (Map) Whitebox - .getInternalState(pm.getHandler(), "map"); + Map map = pm.getHandler().getMap(); for (PortmapMapping m : map.values()) { if (m.getPort() == sent.getPort() diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java index 979e7504a872b..65c6c34eb2ff6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java @@ -30,6 +30,10 @@ public interface FederationRPCMBean { long getProxyOps(); + long getActiveProxyOps(); + + long getObserverProxyOps(); + double getProxyAvg(); long getProcessingOps(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java index 823bc7b8af21c..5d5f9fb8aa12a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; @@ -49,7 +50,10 @@ public class FederationRPCMetrics implements FederationRPCMBean { private MutableRate proxy; @Metric("Number of operations the Router proxied to a Namenode") private MutableCounterLong proxyOp; - + @Metric("Number of operations the Router proxied to a Active Namenode") + private MutableCounterLong activeProxyOp; + @Metric("Number of operations the Router proxied to a Observer Namenode") + private MutableCounterLong observerProxyOp; @Metric("Number of operations to hit a standby NN") private MutableCounterLong proxyOpFailureStandby; @Metric("Number of operations to fail to reach NN") @@ -256,9 +260,15 @@ public String getAsyncCallerPool() { * Add the time to proxy an operation from the moment the Router sends it to * the Namenode until it replied. * @param time Proxy time of an operation in nanoseconds. + * @param state NameNode state. Maybe null */ - public void addProxyTime(long time) { + public void addProxyTime(long time, FederationNamenodeServiceState state) { proxy.add(time); + if(FederationNamenodeServiceState.ACTIVE == state) { + activeProxyOp.incr(); + } else if (FederationNamenodeServiceState.OBSERVER == state) { + observerProxyOp.incr(); + } proxyOp.incr(); } @@ -272,6 +282,16 @@ public long getProxyOps() { return proxyOp.value(); } + @Override + public long getActiveProxyOps() { + return activeProxyOp.value(); + } + + @Override + public long getObserverProxyOps() { + return observerProxyOp.value(); + } + /** * Add the time to process a request in the Router from the time we receive * the call until we send it to the Namenode. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java index 159d08e26a161..b57fa070546e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.federation.router.FederationUtil; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcMonitor; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; @@ -147,12 +148,13 @@ public long proxyOp() { } @Override - public void proxyOpComplete(boolean success, String nsId) { + public void proxyOpComplete(boolean success, String nsId, + FederationNamenodeServiceState state) { if (success) { long proxyTime = getProxyTime(); if (proxyTime >= 0) { if (metrics != null) { - metrics.addProxyTime(proxyTime); + metrics.addProxyTime(proxyTime, state); } if (nameserviceRPCMetricsMap != null && nameserviceRPCMetricsMap.containsKey(nsId)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java index a9d761f45d289..e1068394f6f42 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java @@ -886,7 +886,7 @@ private List getActiveNamenodeRegistrations() // Fetch the most recent namenode registration String nsId = nsInfo.getNameserviceId(); List nns = - namenodeResolver.getNamenodesForNameserviceId(nsId); + namenodeResolver.getNamenodesForNameserviceId(nsId, false); if (nns != null) { FederationNamenodeContext nn = nns.get(0); if (nn instanceof MembershipState) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java index f06df70b517cf..cae1f478604d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java @@ -43,6 +43,17 @@ @InterfaceStability.Evolving public interface ActiveNamenodeResolver { + /** + * Report a failed, unavailable NN address for a nameservice or blockPool. + * + * @param ns Nameservice identifier. + * @param failedAddress The address the failed responded to the command. + * + * @throws IOException If the state store cannot be accessed. + */ + void updateUnavailableNamenode( + String ns, InetSocketAddress failedAddress) throws IOException; + /** * Report a successful, active NN address for a nameservice or blockPool. * @@ -56,20 +67,30 @@ void updateActiveNamenode( /** * Returns a prioritized list of the most recent cached registration entries - * for a single nameservice ID. - * Returns an empty list if none are found. Returns entries in preference of: + * for a single nameservice ID. Returns an empty list if none are found. + * In the case of not observerRead Returns entries in preference of : *
    *
  • The most recent ACTIVE NN + *
  • The most recent OBSERVER NN + *
  • The most recent STANDBY NN + *
  • The most recent UNAVAILABLE NN + *
+ * + * In the case of observerRead Returns entries in preference of : + *
    + *
  • The most recent OBSERVER NN + *
  • The most recent ACTIVE NN *
  • The most recent STANDBY NN *
  • The most recent UNAVAILABLE NN *
* * @param nameserviceId Nameservice identifier. + * @param listObserversFirst Observer read case, observer NN will be ranked first * @return Prioritized list of namenode contexts. * @throws IOException If the state store cannot be accessed. */ - List - getNamenodesForNameserviceId(String nameserviceId) throws IOException; + List getNamenodesForNameserviceId( + String nameserviceId, boolean listObserversFirst) throws IOException; /** * Returns a prioritized list of the most recent cached registration entries @@ -77,6 +98,7 @@ void updateActiveNamenode( * Returns an empty list if none are found. Returns entries in preference of: *
    *
  • The most recent ACTIVE NN + *
  • The most recent OBSERVER NN *
  • The most recent STANDBY NN *
  • The most recent UNAVAILABLE NN *
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java index 9f0f78067aedd..d65ebdb628ef7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState.ACTIVE; import static org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState.EXPIRED; +import static org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState.OBSERVER; import static org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState.UNAVAILABLE; import java.io.IOException; @@ -32,6 +33,7 @@ import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.federation.store.DisabledNameserviceStore; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; @@ -73,8 +75,11 @@ public class MembershipNamenodeResolver /** Parent router ID. */ private String routerId; - /** Cached lookup of NN for nameservice. Invalidated on cache refresh. */ - private Map> cacheNS; + /** Cached lookup of namenodes for nameservice. The keys are a pair of the nameservice + * name and a boolean indicating if observer namenodes should be listed first. + * If true, observer namenodes are listed first. If false, active namenodes are listed first. + * Invalidated on cache refresh. */ + private Map, List> cacheNS; /** Cached lookup of NN for block pool. Invalidated on cache refresh. */ private Map> cacheBP; @@ -136,11 +141,21 @@ public boolean loadCache(boolean force) { return true; } + @Override public void updateUnavailableNamenode(String nsId, + InetSocketAddress address) throws IOException { + updateNameNodeState(nsId, address, UNAVAILABLE); + } + @Override public void updateActiveNamenode( final String nsId, final InetSocketAddress address) throws IOException { + updateNameNodeState(nsId, address, ACTIVE); + } - // Called when we have an RPC miss and successful hit on an alternate NN. + + private void updateNameNodeState(final String nsId, + final InetSocketAddress address, FederationNamenodeServiceState state) + throws IOException { // Temporarily update our cache, it will be overwritten on the next update. try { MembershipState partial = MembershipState.newInstance(); @@ -160,10 +175,11 @@ public void updateActiveNamenode( MembershipState record = records.get(0); UpdateNamenodeRegistrationRequest updateRequest = UpdateNamenodeRegistrationRequest.newInstance( - record.getNameserviceId(), record.getNamenodeId(), ACTIVE); + record.getNameserviceId(), record.getNamenodeId(), state); membership.updateNamenodeRegistration(updateRequest); - cacheNS.remove(nsId); + cacheNS.remove(Pair.of(nsId, Boolean.TRUE)); + cacheNS.remove(Pair.of(nsId, Boolean.FALSE)); // Invalidating the full cacheBp since getting the blockpool id from // namespace id is quite costly. cacheBP.clear(); @@ -175,9 +191,9 @@ public void updateActiveNamenode( @Override public List getNamenodesForNameserviceId( - final String nsId) throws IOException { + final String nsId, boolean listObserversFirst) throws IOException { - List ret = cacheNS.get(nsId); + List ret = cacheNS.get(Pair.of(nsId, listObserversFirst)); if (ret != null) { return ret; } @@ -189,7 +205,8 @@ public List getNamenodesForNameserviceId( partial.setNameserviceId(nsId); GetNamenodeRegistrationsRequest request = GetNamenodeRegistrationsRequest.newInstance(partial); - result = getRecentRegistrationForQuery(request, true, false); + result = getRecentRegistrationForQuery(request, true, + false, listObserversFirst); } catch (StateStoreUnavailableException e) { LOG.error("Cannot get active NN for {}, State Store unavailable", nsId); return null; @@ -218,7 +235,7 @@ public List getNamenodesForNameserviceId( // Cache the response ret = Collections.unmodifiableList(result); - cacheNS.put(nsId, result); + cacheNS.put(Pair.of(nsId, listObserversFirst), result); return ret; } @@ -235,7 +252,7 @@ public List getNamenodesForBlockPoolId( GetNamenodeRegistrationsRequest.newInstance(partial); final List result = - getRecentRegistrationForQuery(request, true, false); + getRecentRegistrationForQuery(request, true, false, false); if (result == null || result.isEmpty()) { LOG.error("Cannot locate eligible NNs for {}", bpId); } else { @@ -346,22 +363,34 @@ public Set getDisabledNamespaces() throws IOException { } /** - * Picks the most relevant record registration that matches the query. Return - * registrations matching the query in this preference: 1) Most recently - * updated ACTIVE registration 2) Most recently updated STANDBY registration - * (if showStandby) 3) Most recently updated UNAVAILABLE registration (if - * showUnavailable). EXPIRED registrations are ignored. + * Picks the most relevant record registration that matches the query. + * If not observer read, + * return registrations matching the query in this preference: + * 1) Most recently updated ACTIVE registration + * 2) Most recently updated Observer registration + * 3) Most recently updated STANDBY registration (if showStandby) + * 4) Most recently updated UNAVAILABLE registration (if showUnavailable). + * + * If observer read, + * return registrations matching the query in this preference: + * 1) Observer registrations, shuffled to disperse queries. + * 2) Most recently updated ACTIVE registration + * 3) Most recently updated STANDBY registration (if showStandby) + * 4) Most recently updated UNAVAILABLE registration (if showUnavailable). + * + * EXPIRED registrations are ignored. * * @param request The select query for NN registrations. * @param addUnavailable include UNAVAILABLE registrations. * @param addExpired include EXPIRED registrations. + * @param observerRead Observer read case, observer NN will be ranked first * @return List of memberships or null if no registrations that * both match the query AND the selected states. * @throws IOException */ private List getRecentRegistrationForQuery( GetNamenodeRegistrationsRequest request, boolean addUnavailable, - boolean addExpired) throws IOException { + boolean addExpired, boolean observerRead) throws IOException { // Retrieve a list of all registrations that match this query. // This may include all NN records for a namespace/blockpool, including @@ -371,24 +400,34 @@ private List getRecentRegistrationForQuery( membershipStore.getNamenodeRegistrations(request); List memberships = response.getNamenodeMemberships(); - if (!addExpired || !addUnavailable) { - Iterator iterator = memberships.iterator(); - while (iterator.hasNext()) { - MembershipState membership = iterator.next(); - if (membership.getState() == EXPIRED && !addExpired) { - iterator.remove(); - } else if (membership.getState() == UNAVAILABLE && !addUnavailable) { - iterator.remove(); - } + List observerMemberships = new ArrayList<>(); + Iterator iterator = memberships.iterator(); + while (iterator.hasNext()) { + MembershipState membership = iterator.next(); + if (membership.getState() == EXPIRED && !addExpired) { + iterator.remove(); + } else if (membership.getState() == UNAVAILABLE && !addUnavailable) { + iterator.remove(); + } else if (membership.getState() == OBSERVER && observerRead) { + iterator.remove(); + observerMemberships.add(membership); } } - List priorityList = new ArrayList<>(); - priorityList.addAll(memberships); - Collections.sort(priorityList, new NamenodePriorityComparator()); + memberships.sort(new NamenodePriorityComparator()); + if(observerRead) { + List ret = new ArrayList<>( + memberships.size() + observerMemberships.size()); + if(observerMemberships.size() > 1) { + Collections.shuffle(observerMemberships); + } + ret.addAll(observerMemberships); + ret.addAll(memberships); + memberships = ret; + } - LOG.debug("Selected most recent NN {} for query", priorityList); - return priorityList; + LOG.debug("Selected most recent NN {} for query", memberships); + return memberships; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java index 9a9abff0677ba..ef3580b35d6f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java @@ -218,7 +218,7 @@ public AtomicInteger getClientIndex() { } /** - * Get the alignment context for this pool + * Get the alignment context for this pool. * @return Alignment context */ public PoolAlignmentContext getPoolAlignmentContext() { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java index 24a85c2d558b3..c598076f636e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java @@ -191,6 +191,12 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic { FEDERATION_STORE_PREFIX + "enable"; public static final boolean DFS_ROUTER_STORE_ENABLE_DEFAULT = true; + public static final String DFS_ROUTER_OBSERVER_READ_DEFAULT_KEY = + FEDERATION_ROUTER_PREFIX + "observer.read.default"; + public static final boolean DFS_ROUTER_OBSERVER_READ_DEFAULT_VALUE = false; + public static final String DFS_ROUTER_OBSERVER_READ_OVERRIDES = + FEDERATION_ROUTER_PREFIX + "observer.read.overrides"; + public static final String DFS_ROUTER_OBSERVER_FEDERATED_STATE_PROPAGATION_MAXSIZE = FEDERATION_ROUTER_PREFIX + "observer.federated.state.propagation.maxsize"; public static final int DFS_ROUTER_OBSERVER_FEDERATED_STATE_PROPAGATION_MAXSIZE_DEFAULT = 5; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 9d3973d450bdd..a5f83c95b7baf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -1918,7 +1918,10 @@ public BatchedEntries listOpenFiles(long prevId, @Override public void msync() throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.READ, false); + rpcServer.checkOperation(NameNode.OperationCategory.READ, true); + Set nss = namenodeResolver.getNamespaces(); + RemoteMethod method = new RemoteMethod("msync"); + rpcClient.invokeConcurrent(nss, method); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java index 918a46f80ca05..62ae4b0b95de7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -38,6 +38,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; @@ -70,16 +71,19 @@ import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.namenode.ha.ReadOnly; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision; import org.apache.hadoop.ipc.CallerContext; +import org.apache.hadoop.ipc.ObserverRetryOnActiveException; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server.Call; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.net.ConnectTimeoutException; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.eclipse.jetty.util.ajax.JSON; @@ -128,6 +132,10 @@ public class RouterRpcClient { private final RouterRpcMonitor rpcMonitor; /** Field separator of CallerContext. */ private final String contextFieldSeparator; + /** Observer read enabled. Default for all nameservices. */ + private final boolean observerReadEnabledDefault; + /** Nameservice specific overrides of the default setting for enabling observer reads. */ + private HashSet observerReadEnabledOverrides = new HashSet<>(); /** Pattern to parse a stack trace line. */ private static final Pattern STACK_TRACE_PATTERN = @@ -200,6 +208,16 @@ public RouterRpcClient(Configuration conf, Router router, failoverSleepBaseMillis, failoverSleepMaxMillis); String[] ipProxyUsers = conf.getStrings(DFS_NAMENODE_IP_PROXY_USERS); this.enableProxyUser = ipProxyUsers != null && ipProxyUsers.length > 0; + this.observerReadEnabledDefault = conf.getBoolean( + RBFConfigKeys.DFS_ROUTER_OBSERVER_READ_DEFAULT_KEY, + RBFConfigKeys.DFS_ROUTER_OBSERVER_READ_DEFAULT_VALUE); + String[] observerReadOverrides = conf.getStrings(RBFConfigKeys.DFS_ROUTER_OBSERVER_READ_OVERRIDES); + if (observerReadOverrides != null) { + observerReadEnabledOverrides.addAll(Arrays.asList(observerReadOverrides)); + } + if (this.observerReadEnabledDefault) { + LOG.info("Observer read is enabled for router."); + } } /** @@ -451,6 +469,7 @@ private RetryDecision shouldRetry(final IOException ioe, final int retryCount, * @param ugi User group information. * @param namenodes A prioritized list of namenodes within the same * nameservice. + * @param useObserver Whether to use observer namenodes. * @param method Remote ClientProtocol method to invoke. * @param params Variable list of parameters matching the method. * @return The result of invoking the method. @@ -462,6 +481,7 @@ private RetryDecision shouldRetry(final IOException ioe, final int retryCount, public Object invokeMethod( final UserGroupInformation ugi, final List namenodes, + boolean useObserver, final Class protocol, final Method method, final Object... params) throws ConnectException, StandbyException, IOException { @@ -478,8 +498,12 @@ public Object invokeMethod( rpcMonitor.proxyOp(); } boolean failover = false; + boolean shouldUseObserver = useObserver; Map ioes = new LinkedHashMap<>(); for (FederationNamenodeContext namenode : namenodes) { + if (!shouldUseObserver && (namenode.getState() == FederationNamenodeServiceState.OBSERVER)) { + continue; + } ConnectionContext connection = null; String nsId = namenode.getNameserviceId(); String rpcAddress = namenode.getRpcAddress(); @@ -489,13 +513,14 @@ public Object invokeMethod( final Object proxy = client.getProxy(); ret = invoke(nsId, 0, method, proxy, params); - if (failover) { + if (failover && + FederationNamenodeServiceState.OBSERVER != namenode.getState()) { // Success on alternate server, update InetSocketAddress address = client.getAddress(); namenodeResolver.updateActiveNamenode(nsId, address); } if (this.rpcMonitor != null) { - this.rpcMonitor.proxyOpComplete(true, nsId); + this.rpcMonitor.proxyOpComplete(true, nsId, namenode.getState()); } if (this.router.getRouterClientMetrics() != null) { this.router.getRouterClientMetrics().incInvokedMethod(method); @@ -503,7 +528,11 @@ public Object invokeMethod( return ret; } catch (IOException ioe) { ioes.put(namenode, ioe); - if (ioe instanceof StandbyException) { + if (ioe instanceof ObserverRetryOnActiveException) { + LOG.info("Encountered ObserverRetryOnActiveException from {}." + + " Retry active namenode directly.", namenode); + shouldUseObserver = false; + } else if (ioe instanceof StandbyException) { // Fail over indicated by retry policy and/or NN if (this.rpcMonitor != null) { this.rpcMonitor.proxyOpFailureStandby(nsId); @@ -513,10 +542,15 @@ public Object invokeMethod( if (this.rpcMonitor != null) { this.rpcMonitor.proxyOpFailureCommunicate(nsId); } - failover = true; + if (FederationNamenodeServiceState.OBSERVER == namenode.getState()) { + namenodeResolver.updateUnavailableNamenode(nsId, + NetUtils.createSocketAddr(namenode.getRpcAddress())); + } else { + failover = true; + } } else if (ioe instanceof RemoteException) { if (this.rpcMonitor != null) { - this.rpcMonitor.proxyOpComplete(true, nsId); + this.rpcMonitor.proxyOpComplete(true, nsId, namenode.getState()); } RemoteException re = (RemoteException) ioe; ioe = re.unwrapRemoteException(); @@ -546,7 +580,7 @@ public Object invokeMethod( // Communication retries are handled by the retry policy if (this.rpcMonitor != null) { this.rpcMonitor.proxyOpFailureCommunicate(nsId); - this.rpcMonitor.proxyOpComplete(false, nsId); + this.rpcMonitor.proxyOpComplete(false, nsId, namenode.getState()); } throw ioe; } @@ -557,7 +591,7 @@ public Object invokeMethod( } } if (this.rpcMonitor != null) { - this.rpcMonitor.proxyOpComplete(false, null); + this.rpcMonitor.proxyOpComplete(false, null, null); } // All namenodes were unavailable or in standby @@ -640,16 +674,12 @@ private void addClientInfoToCallerContext() { * @param params Variable parameters * @return Response from the remote server * @throws IOException - * @throws InterruptedException */ private Object invoke(String nsId, int retryCount, final Method method, final Object obj, final Object... params) throws IOException { try { return method.invoke(obj, params); - } catch (IllegalAccessException e) { - LOG.error("Unexpected exception while proxying API", e); - return null; - } catch (IllegalArgumentException e) { + } catch (IllegalAccessException | IllegalArgumentException e) { LOG.error("Unexpected exception while proxying API", e); return null; } catch (InvocationTargetException e) { @@ -713,7 +743,7 @@ public static boolean isUnavailableException(IOException ioe) { */ private boolean isClusterUnAvailable(String nsId) throws IOException { List nnState = this.namenodeResolver - .getNamenodesForNameserviceId(nsId); + .getNamenodesForNameserviceId(nsId, false); if (nnState != null) { for (FederationNamenodeContext nnContext : nnState) { @@ -844,13 +874,13 @@ public Object invokeSingle(final String nsId, RemoteMethod method) RouterRpcFairnessPolicyController controller = getRouterRpcFairnessPolicyController(); acquirePermit(nsId, ugi, method, controller); try { - List nns = - getNamenodesForNameservice(nsId); + boolean isObserverRead = isObserverReadEligible(nsId, method.getMethod()); + List nns = getOrderedNamenodes(nsId, isObserverRead); RemoteLocationContext loc = new RemoteLocation(nsId, "/", "/"); Class proto = method.getProtocol(); Method m = method.getMethod(); Object[] params = method.getParams(loc); - return invokeMethod(ugi, nns, proto, m, params); + return invokeMethod(ugi, nns, isObserverRead, proto, m, params); } finally { releasePermit(nsId, ugi, method, controller); } @@ -927,7 +957,7 @@ public T invokeSingle(final RemoteLocationContext location, * @throws IOException if the success condition is not met and one of the RPC * calls generated a remote exception. */ - public Object invokeSequential( + public T invokeSequential( final List locations, final RemoteMethod remoteMethod) throws IOException { return invokeSequential(locations, remoteMethod, null, null); @@ -1012,12 +1042,14 @@ public RemoteResult invokeSequential( for (final RemoteLocationContext loc : locations) { String ns = loc.getNameserviceId(); acquirePermit(ns, ugi, remoteMethod, controller); + boolean isObserverRead = isObserverReadEligible(ns, m); List namenodes = - getNamenodesForNameservice(ns); + getOrderedNamenodes(ns, isObserverRead); try { Class proto = remoteMethod.getProtocol(); Object[] params = remoteMethod.getParams(loc); - Object result = invokeMethod(ugi, namenodes, proto, m, params); + Object result = invokeMethod( + ugi, namenodes, isObserverRead, proto, m, params); // Check if the result is what we expected if (isExpectedClass(expectedResultClass, result) && isExpectedValue(expectedResultValue, result)) { @@ -1373,12 +1405,14 @@ public Map invokeConcurrent( String ns = location.getNameserviceId(); RouterRpcFairnessPolicyController controller = getRouterRpcFairnessPolicyController(); acquirePermit(ns, ugi, method, controller); + boolean isObserverRead = isObserverReadEligible(ns, m); final List namenodes = - getNamenodesForNameservice(ns); + getOrderedNamenodes(ns, isObserverRead); try { Class proto = method.getProtocol(); Object[] paramList = method.getParams(location); - R result = (R) invokeMethod(ugi, namenodes, proto, m, paramList); + R result = (R) invokeMethod( + ugi, namenodes, isObserverRead, proto, m, paramList); RemoteResult remoteResult = new RemoteResult<>(location, result); return Collections.singletonList(remoteResult); } catch (IOException ioe) { @@ -1396,8 +1430,9 @@ public Map invokeConcurrent( final CallerContext originContext = CallerContext.getCurrent(); for (final T location : locations) { String nsId = location.getNameserviceId(); + boolean isObserverRead = isObserverReadEligible(nsId, m); final List namenodes = - getNamenodesForNameservice(nsId); + getOrderedNamenodes(nsId, isObserverRead); final Class proto = method.getProtocol(); final Object[] paramList = method.getParams(location); if (standby) { @@ -1414,7 +1449,8 @@ public Map invokeConcurrent( callables.add( () -> { transferThreadLocalContext(originCall, originContext); - return invokeMethod(ugi, nnList, proto, m, paramList); + return invokeMethod( + ugi, nnList, isObserverRead, proto, m, paramList); }); } } else { @@ -1423,7 +1459,8 @@ public Map invokeConcurrent( callables.add( () -> { transferThreadLocalContext(originCall, originContext); - return invokeMethod(ugi, namenodes, proto, m, paramList); + return invokeMethod( + ugi, namenodes, isObserverRead, proto, m, paramList); }); } } @@ -1512,27 +1549,6 @@ private void transferThreadLocalContext( CallerContext.setCurrent(originContext); } - /** - * Get a prioritized list of NNs that share the same nameservice ID (in the - * same namespace). NNs that are reported as ACTIVE will be first in the list. - * - * @param nsId The nameservice ID for the namespace. - * @return A prioritized list of NNs to use for communication. - * @throws IOException If a NN cannot be located for the nameservice ID. - */ - private List getNamenodesForNameservice( - final String nsId) throws IOException { - - final List namenodes = - namenodeResolver.getNamenodesForNameserviceId(nsId); - - if (namenodes == null || namenodes.isEmpty()) { - throw new IOException("Cannot locate a registered namenode for " + nsId + - " from " + router.getRouterId()); - } - return namenodes; - } - /** * Get a prioritized list of NNs that share the same block pool ID (in the * same namespace). NNs that are reported as ACTIVE will be first in the list. @@ -1670,4 +1686,48 @@ private String getCurrentFairnessPolicyControllerClassName() { } return null; } + + /** + * Get a prioritized list of NNs that share the same nameservice ID (in the + * same namespace). + * In observer read case, OBSERVER NNs will be first in the list. + * Otherwise, ACTIVE NNs will be first in the list. + * + * @param nsId The nameservice ID for the namespace. + * @param isObserverRead Read on observer namenode. + * @return A prioritized list of NNs to use for communication. + * @throws IOException If a NN cannot be located for the nameservice ID. + */ + private List getOrderedNamenodes(String nsId, + boolean isObserverRead) throws IOException { + final List namenodes; + + if (RouterStateIdContext.getClientStateIdFromCurrentCall(nsId) > Long.MIN_VALUE) { + namenodes = namenodeResolver.getNamenodesForNameserviceId(nsId, isObserverRead); + } else { + namenodes = namenodeResolver.getNamenodesForNameserviceId(nsId, false); + } + + if (namenodes == null || namenodes.isEmpty()) { + throw new IOException("Cannot locate a registered namenode for " + nsId + + " from " + router.getRouterId()); + } + return namenodes; + } + + private boolean isObserverReadEligible(String nsId, Method method) { + boolean isReadEnabledForNamespace = observerReadEnabledDefault != observerReadEnabledOverrides.contains(nsId); + return isReadEnabledForNamespace && isReadCall(method); + } + + /** + * Check if a method is read-only. + * @return whether the 'method' is a read-only operation. + */ + private static boolean isReadCall(Method method) { + if (!method.isAnnotationPresent(ReadOnly.class)) { + return false; + } + return !method.getAnnotationsByType(ReadOnly.class)[0].activeOnly(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java index 039b40ae2e585..256f03f12ff38 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java @@ -19,6 +19,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCMetrics; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; /** @@ -61,8 +62,9 @@ void init( /** * Mark a proxy operation as completed. * @param success If the operation was successful. + * @param state proxy namenode state. */ - void proxyOpComplete(boolean success, String nsId); + void proxyOpComplete(boolean success, String nsId, FederationNamenodeServiceState state); /** * Failed to proxy an operation to a Namenode because it was in standby. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 86fda12307cec..c4173163436ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -1331,7 +1331,7 @@ public void modifyAclEntries(String src, List aclSpec) clientProto.modifyAclEntries(src, aclSpec); } - @Override // ClienProtocol + @Override // ClientProtocol public void removeAclEntries(String src, List aclSpec) throws IOException { clientProto.removeAclEntries(src, aclSpec); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml index 7c0cb8b437024..52a1e3a3bd1e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml @@ -835,6 +835,25 @@ + + dfs.federation.router.observer.read.default + false + + Whether observer reads are enabled. This is a default for all nameservices. + The default can be inverted for individual namespace by adding them to + dfs.federation.router.observer.read.overrides. + + + + + dfs.federation.router.observer.read.overrides + + + Commas separated list of namespaces for which to invert the default configuration, + dfs.federation.router.observer.read.default, for whether to enable observer reads. + + + dfs.federation.router.observer.federated.state.propagation.maxsize 5 diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index 4c6b151bcff10..5a9c2fd42855c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -46,7 +46,6 @@ This approach has the same architecture as [YARN federation](../../hadoop-yarn/h ### Example flow The simplest configuration deploys a Router on each NameNode machine. The Router monitors the local NameNode and its state and heartbeats to the State Store. -The Router monitors the local NameNode and heartbeats the state to the State Store. When a regular DFS client contacts any of the Routers to access a file in the federated filesystem, the Router checks the Mount Table in the State Store (i.e., the local cache) to find out which subcluster contains the file. Then it checks the Membership table in the State Store (i.e., the local cache) for the NameNode responsible for the subcluster. After it has identified the correct NameNode, the Router proxies the request. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java index 79c28986c33f1..b0a897d9f4bb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java @@ -175,7 +175,7 @@ public static void waitNamenodeRegistered( GenericTestUtils.waitFor(() -> { try { List namenodes = - resolver.getNamenodesForNameserviceId(nsId); + resolver.getNamenodesForNameserviceId(nsId, false); if (namenodes != null) { for (FederationNamenodeContext namenode : namenodes) { // Check if this is the Namenode we are checking @@ -207,7 +207,7 @@ public static void waitNamenodeRegistered( GenericTestUtils.waitFor(() -> { try { List nns = - resolver.getNamenodesForNameserviceId(nsId); + resolver.getNamenodesForNameserviceId(nsId, false); for (FederationNamenodeContext nn : nns) { if (nn.getState().equals(state)) { return true; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java index 53247262cefb1..4fcdf6595e4ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java @@ -806,6 +806,7 @@ public void startCluster(Configuration overrideConf) { .numDataNodes(numDNs) .nnTopology(topology) .dataNodeConfOverlays(dnConfs) + .checkExitOnShutdown(false) .storageTypes(storageTypes) .racks(racks) .build(); @@ -1038,6 +1039,27 @@ public void switchToStandby(String nsId, String nnId) { } } + /** + * Switch a namenode in a nameservice to be the observer. + * @param nsId Nameservice identifier. + * @param nnId Namenode identifier. + */ + public void switchToObserver(String nsId, String nnId) { + try { + int total = cluster.getNumNameNodes(); + NameNodeInfo[] nns = cluster.getNameNodeInfos(); + for (int i = 0; i < total; i++) { + NameNodeInfo nn = nns[i]; + if (nn.getNameserviceId().equals(nsId) && + nn.getNamenodeId().equals(nnId)) { + cluster.transitionToObserver(i); + } + } + } catch (Throwable e) { + LOG.error("Cannot transition to active", e); + } + } + /** * Stop the federated HDFS cluster. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java index f8f6ccef36374..4aaa8e7569e88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java @@ -23,6 +23,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -119,12 +120,24 @@ public void setDisableRegistration(boolean isDisable) { disableRegistration = isDisable; } + @Override public void updateUnavailableNamenode(String ns, + InetSocketAddress failedAddress) throws IOException { + updateNameNodeState(ns, failedAddress, + FederationNamenodeServiceState.UNAVAILABLE); + } + @Override public void updateActiveNamenode( String nsId, InetSocketAddress successfulAddress) { + updateNameNodeState(nsId, successfulAddress, + FederationNamenodeServiceState.ACTIVE); + } - String address = successfulAddress.getHostName() + ":" + - successfulAddress.getPort(); + private void updateNameNodeState(String nsId, + InetSocketAddress iAddr, + FederationNamenodeServiceState state) { + String sAddress = iAddr.getHostName() + ":" + + iAddr.getPort(); String key = nsId; if (key != null) { // Update the active entry @@ -132,9 +145,9 @@ public void updateActiveNamenode( List namenodes = (List) this.resolver.get(key); for (FederationNamenodeContext namenode : namenodes) { - if (namenode.getRpcAddress().equals(address)) { + if (namenode.getRpcAddress().equals(sAddress)) { MockNamenodeContext nn = (MockNamenodeContext) namenode; - nn.setState(FederationNamenodeServiceState.ACTIVE); + nn.setState(state); break; } } @@ -147,14 +160,39 @@ public void updateActiveNamenode( @Override public synchronized List - getNamenodesForNameserviceId(String nameserviceId) { + getNamenodesForNameserviceId(String nameserviceId, boolean observerRead) { // Return a copy of the list because it is updated periodically List namenodes = this.resolver.get(nameserviceId); if (namenodes == null) { namenodes = new ArrayList<>(); } - return Collections.unmodifiableList(new ArrayList<>(namenodes)); + + List ret = new ArrayList<>(); + + if (observerRead) { + Iterator iterator = namenodes + .iterator(); + List observerNN = new ArrayList<>(); + List nonObserverNN = new ArrayList<>(); + while (iterator.hasNext()) { + FederationNamenodeContext membership = iterator.next(); + if (membership.getState() == FederationNamenodeServiceState.OBSERVER) { + observerNN.add(membership); + } else { + nonObserverNN.add(membership); + } + } + Collections.shuffle(observerNN); + Collections.sort(nonObserverNN, new NamenodePriorityComparator()); + ret.addAll(observerNN); + ret.addAll(nonObserverNN); + } else { + ret.addAll(namenodes); + Collections.sort(ret, new NamenodePriorityComparator()); + } + + return Collections.unmodifiableList(ret); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java index 065209060220e..0741f1aed441a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java @@ -161,7 +161,8 @@ public void testRefreshStaticChangeHandlers() throws Exception { Thread.sleep(sleepTime); return null; }).when(client) - .invokeMethod(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); + .invokeMethod(Mockito.any(), Mockito.any(), Mockito.anyBoolean(), + Mockito.any(), Mockito.any(), Mockito.any()); // No calls yet assertEquals("{}", diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java index ed10a3a87317d..b602a27c95f60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java @@ -129,7 +129,7 @@ private void verifyFirstRegistration(String nsId, String nnId, int resultsCount, FederationNamenodeServiceState state) throws IOException { List namenodes = - namenodeResolver.getNamenodesForNameserviceId(nsId); + namenodeResolver.getNamenodesForNameserviceId(nsId, false); if (resultsCount == 0) { assertNull(namenodes); } else { @@ -291,8 +291,8 @@ public void testCacheUpdateOnNamenodeStateUpdate() throws IOException { HAServiceState.STANDBY))); stateStore.refreshCaches(true); // Check whether the namenpde state is reported correct as standby. - FederationNamenodeContext namenode = - namenodeResolver.getNamenodesForNameserviceId(NAMESERVICES[0]).get(0); + FederationNamenodeContext namenode = namenodeResolver + .getNamenodesForNameserviceId(NAMESERVICES[0], false).get(0); assertEquals(FederationNamenodeServiceState.STANDBY, namenode.getState()); String rpcAddr = namenode.getRpcAddress(); InetSocketAddress inetAddr = getInetSocketAddress(rpcAddr); @@ -301,8 +301,8 @@ public void testCacheUpdateOnNamenodeStateUpdate() throws IOException { // RouterRpcClient calls updateActiveNamenode to update the state to active, // Check whether correct updated state is returned post update. namenodeResolver.updateActiveNamenode(NAMESERVICES[0], inetAddr); - FederationNamenodeContext namenode1 = - namenodeResolver.getNamenodesForNameserviceId(NAMESERVICES[0]).get(0); + FederationNamenodeContext namenode1 = namenodeResolver + .getNamenodesForNameserviceId(NAMESERVICES[0], false).get(0); assertEquals("The namenode state should be ACTIVE post update.", FederationNamenodeServiceState.ACTIVE, namenode1.getState()); } @@ -318,8 +318,8 @@ public void testCacheUpdateOnNamenodeStateUpdateWithIp() InetSocketAddress inetAddr = getInetSocketAddress(rpcAddress); namenodeResolver.updateActiveNamenode(NAMESERVICES[0], inetAddr); - FederationNamenodeContext namenode = - namenodeResolver.getNamenodesForNameserviceId(NAMESERVICES[0]).get(0); + FederationNamenodeContext namenode = namenodeResolver + .getNamenodesForNameserviceId(NAMESERVICES[0], false).get(0); assertEquals("The namenode state should be ACTIVE post update.", FederationNamenodeServiceState.ACTIVE, namenode.getState()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestObserverWithRouter.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestObserverWithRouter.java new file mode 100644 index 0000000000000..fbd731c073f4b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestObserverWithRouter.java @@ -0,0 +1,425 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMENODES; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_MONITOR_NAMENODE; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState; +import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.junit.After; +import org.junit.Test; + +public class TestObserverWithRouter { + + private MiniRouterDFSCluster cluster; + + public void startUpCluster(int numberOfObserver) throws Exception { + startUpCluster(numberOfObserver, null); + } + + public void startUpCluster(int numberOfObserver, Configuration confOverrides) throws Exception { + int numberOfNamenode = 2 + numberOfObserver; + Configuration conf = new Configuration(false); + conf.setBoolean(RBFConfigKeys.DFS_ROUTER_OBSERVER_READ_DEFAULT_KEY, true); + conf.setBoolean(DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY, true); + conf.set(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, "0ms"); + if (confOverrides != null) { + conf.addResource(confOverrides); + } + cluster = new MiniRouterDFSCluster(true, 2, numberOfNamenode); + cluster.addNamenodeOverrides(conf); + // Start NNs and DNs and wait until ready + cluster.startCluster(); + + // Making one Namenode active per nameservice + if (cluster.isHighAvailability()) { + for (String ns : cluster.getNameservices()) { + cluster.switchToActive(ns, NAMENODES[0]); + cluster.switchToStandby(ns, NAMENODES[1]); + for (int i = 2; i < numberOfNamenode; i++) { + cluster.switchToObserver(ns, NAMENODES[i]); + } + } + } + + Configuration routerConf = new RouterConfigBuilder() + .metrics() + .rpc() + .build(); + + cluster.addRouterOverrides(conf); + cluster.addRouterOverrides(routerConf); + + // Start routers with only an RPC service + cluster.startRouters(); + + // Register and verify all NNs with all routers + cluster.registerNamenodes(); + cluster.waitNamenodeRegistration(); + // Setup the mount table + cluster.installMockLocations(); + + cluster.waitActiveNamespaces(); + } + + @After + public void teardown() throws IOException { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + + @Test + public void testObserverRead() throws Exception { + startUpCluster(1); + RouterContext routerContext = cluster.getRandomRouter(); + List namenodes = routerContext + .getRouter().getNamenodeResolver() + .getNamenodesForNameserviceId(cluster.getNameservices().get(0), true); + assertEquals("First namenode should be observer", namenodes.get(0).getState(), + FederationNamenodeServiceState.OBSERVER); + FileSystem fileSystem = routerContext.getFileSystem(); + Path path = new Path("/testFile"); + // Send Create call to active + fileSystem.create(path).close(); + + // Send read request to observer + fileSystem.open(path).close(); + + long rpcCountForActive = routerContext.getRouter().getRpcServer() + .getRPCMetrics().getActiveProxyOps(); + // Create and complete calls should be sent to active + assertEquals("Two calls should be sent to active", 2, rpcCountForActive); + + long rpcCountForObserver = routerContext.getRouter().getRpcServer() + .getRPCMetrics().getObserverProxyOps(); + // getBlockLocations should be sent to observer + assertEquals("One call should be sent to observer", 1, rpcCountForObserver); + fileSystem.close(); + } + + @Test + public void testObserverReadWithoutFederatedStatePropagation() throws Exception { + Configuration confOverrides = new Configuration(false); + confOverrides.setInt(RBFConfigKeys.DFS_ROUTER_OBSERVER_FEDERATED_STATE_PROPAGATION_MAXSIZE, 0); + startUpCluster(1, confOverrides); + RouterContext routerContext = cluster.getRandomRouter(); + List namenodes = routerContext + .getRouter().getNamenodeResolver() + .getNamenodesForNameserviceId(cluster.getNameservices().get(0), true); + assertEquals("First namenode should be observer", namenodes.get(0).getState(), + FederationNamenodeServiceState.OBSERVER); + FileSystem fileSystem = routerContext.getFileSystem(); + Path path = new Path("/testFile"); + // Send Create call to active + fileSystem.create(path).close(); + + // Send read request to observer. The router will msync to the active namenode. + fileSystem.open(path).close(); + + long rpcCountForActive = routerContext.getRouter().getRpcServer() + .getRPCMetrics().getActiveProxyOps(); + // Create, complete and getBlockLocations calls should be sent to active + assertEquals("Three calls should be sent to active", 3, rpcCountForActive); + + long rpcCountForObserver = routerContext.getRouter().getRpcServer() + .getRPCMetrics().getObserverProxyOps(); + assertEquals("No call should be sent to observer", 0, rpcCountForObserver); + fileSystem.close(); + } + + @Test + public void testDisablingObserverReadUsingNameserviceOverride() throws Exception { + // Disable observer reads using per-nameservice override + Configuration confOverrides = new Configuration(false); + confOverrides.set(RBFConfigKeys.DFS_ROUTER_OBSERVER_READ_OVERRIDES, "ns0"); + startUpCluster(1, confOverrides); + + RouterContext routerContext = cluster.getRandomRouter(); + FileSystem fileSystem = routerContext.getFileSystem(); + Path path = new Path("/testFile"); + fileSystem.create(path).close(); + fileSystem.open(path).close(); + fileSystem.close(); + + long rpcCountForActive = routerContext.getRouter().getRpcServer() + .getRPCMetrics().getActiveProxyOps(); + // Create, complete and read calls should be sent to active + assertEquals("Three calls should be sent to active", 3, rpcCountForActive); + + long rpcCountForObserver = routerContext.getRouter().getRpcServer() + .getRPCMetrics().getObserverProxyOps(); + assertEquals("Zero calls should be sent to observer", 0, rpcCountForObserver); + } + + @Test + public void testReadWhenObserverIsDown() throws Exception { + startUpCluster(1); + RouterContext routerContext = cluster.getRandomRouter(); + FileSystem fileSystem = routerContext.getFileSystem(); + Path path = new Path("/testFile1"); + // Send Create call to active + fileSystem.create(path).close(); + + // Stop observer NN + int nnIndex = stopObserver(1); + + assertNotEquals("No observer found", 3, nnIndex); + + // Send read request + fileSystem.open(path).close(); + + long rpcCountForActive = routerContext.getRouter().getRpcServer() + .getRPCMetrics().getActiveProxyOps(); + // Create, complete and getBlockLocation calls should be sent to active + assertEquals("Three calls should be sent to active", 3, + rpcCountForActive); + + long rpcCountForObserver = routerContext.getRouter().getRpcServer() + .getRPCMetrics().getObserverProxyOps(); + assertEquals("No call should send to observer", 0, + rpcCountForObserver); + fileSystem.close(); + } + + @Test + public void testMultipleObserver() throws Exception { + startUpCluster(2); + RouterContext routerContext = cluster.getRandomRouter(); + FileSystem fileSystem = routerContext.getFileSystem(); + Path path = new Path("/testFile1"); + // Send Create call to active + fileSystem.create(path).close(); + + // Stop one observer NN + stopObserver(1); + + // Send read request + fileSystem.open(path).close(); + + long rpcCountForActive = routerContext.getRouter().getRpcServer() + .getRPCMetrics().getActiveProxyOps(); + + long expectedActiveRpc = 2; + long expectedObserverRpc = 1; + + // Create and complete calls should be sent to active + assertEquals("Two calls should be sent to active", + expectedActiveRpc, rpcCountForActive); + + long rpcCountForObserver = routerContext.getRouter() + .getRpcServer().getRPCMetrics().getObserverProxyOps(); + // getBlockLocation call should send to observer + assertEquals("Read should be success with another observer", + expectedObserverRpc, rpcCountForObserver); + + // Stop one observer NN + stopObserver(1); + + // Send read request + fileSystem.open(path).close(); + + rpcCountForActive = routerContext.getRouter() + .getRpcServer().getRPCMetrics().getActiveProxyOps(); + + // getBlockLocation call should be sent to active + expectedActiveRpc += 1; + assertEquals("One call should be sent to active", expectedActiveRpc, + rpcCountForActive); + expectedObserverRpc += 0; + rpcCountForObserver = routerContext.getRouter() + .getRpcServer().getRPCMetrics().getObserverProxyOps(); + assertEquals("No call should send to observer", + expectedObserverRpc, rpcCountForObserver); + fileSystem.close(); + } + + private int stopObserver(int num) { + int nnIndex; + for (nnIndex = 0; nnIndex < cluster.getNamenodes().size(); nnIndex++) { + NameNode nameNode = cluster.getCluster().getNameNode(nnIndex); + if (nameNode != null && nameNode.isObserverState()) { + cluster.getCluster().shutdownNameNode(nnIndex); + num--; + if (num == 0) { + break; + } + } + } + return nnIndex; + } + + // test router observer with multiple to know which observer NN received + // requests + @Test + public void testMultipleObserverRouter() throws Exception { + StateStoreDFSCluster innerCluster; + RouterContext routerContext; + MembershipNamenodeResolver resolver; + + String ns0; + String ns1; + //create 4NN, One Active One Standby and Two Observers + innerCluster = new StateStoreDFSCluster(true, 4, 4, TimeUnit.SECONDS.toMillis(5), + TimeUnit.SECONDS.toMillis(5)); + Configuration routerConf = + new RouterConfigBuilder().stateStore().admin().rpc() + .enableLocalHeartbeat(true).heartbeat().build(); + + StringBuilder sb = new StringBuilder(); + ns0 = innerCluster.getNameservices().get(0); + MiniRouterDFSCluster.NamenodeContext context = + innerCluster.getNamenodes(ns0).get(1); + routerConf.set(DFS_NAMESERVICE_ID, ns0); + routerConf.set(DFS_HA_NAMENODE_ID_KEY, context.getNamenodeId()); + + // Specify namenodes (ns1.nn0,ns1.nn1) to monitor + ns1 = innerCluster.getNameservices().get(1); + for (MiniRouterDFSCluster.NamenodeContext ctx : innerCluster.getNamenodes(ns1)) { + String suffix = ctx.getConfSuffix(); + if (sb.length() != 0) { + sb.append(","); + } + sb.append(suffix); + } + routerConf.set(DFS_ROUTER_MONITOR_NAMENODE, sb.toString()); + routerConf.setBoolean(RBFConfigKeys.DFS_ROUTER_OBSERVER_READ_DEFAULT_KEY, true); + routerConf.setBoolean(DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY, true); + routerConf.set(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, "0ms"); + + innerCluster.addNamenodeOverrides(routerConf); + innerCluster.addRouterOverrides(routerConf); + innerCluster.startCluster(); + + if (innerCluster.isHighAvailability()) { + for (String ns : innerCluster.getNameservices()) { + innerCluster.switchToActive(ns, NAMENODES[0]); + innerCluster.switchToStandby(ns, NAMENODES[1]); + for (int i = 2; i < 4; i++) { + innerCluster.switchToObserver(ns, NAMENODES[i]); + } + } + } + innerCluster.startRouters(); + innerCluster.waitClusterUp(); + + routerContext = innerCluster.getRandomRouter(); + resolver = (MembershipNamenodeResolver) routerContext.getRouter() + .getNamenodeResolver(); + + resolver.loadCache(true); + List namespaceInfo0 = + resolver.getNamenodesForNameserviceId(ns0, true); + List namespaceInfo1 = + resolver.getNamenodesForNameserviceId(ns1, true); + assertEquals(namespaceInfo0.get(0).getState(), + FederationNamenodeServiceState.OBSERVER); + assertEquals(namespaceInfo0.get(1).getState(), + FederationNamenodeServiceState.OBSERVER); + assertNotEquals(namespaceInfo0.get(0).getNamenodeId(), + namespaceInfo0.get(1).getNamenodeId()); + assertEquals(namespaceInfo1.get(0).getState(), + FederationNamenodeServiceState.OBSERVER); + } + + @Test + public void testUnavailableObserverNN() throws Exception { + startUpCluster(2); + RouterContext routerContext = cluster.getRandomRouter(); + FileSystem fileSystem = routerContext.getFileSystem(); + + stopObserver(2); + + Path path = new Path("/testFile"); + // Send Create call to active + fileSystem.create(path).close(); + + // Send read request. + fileSystem.open(path).close(); + + long rpcCountForActive = routerContext.getRouter().getRpcServer() + .getRPCMetrics().getActiveProxyOps(); + + // Create, complete and getBlockLocations + // calls should be sent to active. + assertEquals("Three calls should be send to active", + 3, rpcCountForActive); + + + boolean hasUnavailable = false; + for(String ns : cluster.getNameservices()) { + List nns = routerContext.getRouter() + .getNamenodeResolver().getNamenodesForNameserviceId(ns, false); + for(FederationNamenodeContext nn : nns) { + if(FederationNamenodeServiceState.UNAVAILABLE == nn.getState()) { + hasUnavailable = true; + } + } + } + // After attempting to communicate with unavailable observer namenode, + // its state is updated to unavailable. + assertTrue("There must be unavailable namenodes", hasUnavailable); + } + + @Test + public void testRouterMsync() throws Exception { + startUpCluster(1); + RouterContext routerContext = cluster.getRandomRouter(); + + FileSystem fileSystem = routerContext.getFileSystem(); + Path path = new Path("/testFile"); + + // Send Create call to active + fileSystem.create(path).close(); + long rpcCountForActive = routerContext.getRouter().getRpcServer() + .getRPCMetrics().getActiveProxyOps(); + // Create and complete calls should be sent to active + assertEquals("Two calls should be sent to active", 2, + rpcCountForActive); + + // Send msync + fileSystem.msync(); + rpcCountForActive = routerContext.getRouter().getRpcServer() + .getRPCMetrics().getActiveProxyOps(); + // 2 msync calls should be sent. One to each active namenode in the two namespaces. + assertEquals("Four calls should be sent to active", 4, + rpcCountForActive); + fileSystem.close(); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java index 94f2baeaed136..04b4b58bcb6e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java @@ -167,7 +167,7 @@ public void testHearbeat() throws InterruptedException, IOException { // Verify the locator has matching NN entries for each NS for (String ns : cluster.getNameservices()) { List nns = - namenodeResolver.getNamenodesForNameserviceId(ns); + namenodeResolver.getNamenodesForNameserviceId(ns, false); // Active FederationNamenodeContext active = nns.get(0); @@ -191,7 +191,7 @@ public void testHearbeat() throws InterruptedException, IOException { // Verify the locator has recorded the failover for the failover NS List failoverNSs = - namenodeResolver.getNamenodesForNameserviceId(failoverNS); + namenodeResolver.getNamenodesForNameserviceId(failoverNS, false); // Active FederationNamenodeContext active = failoverNSs.get(0); assertEquals(NAMENODES[1], active.getNamenodeId()); @@ -202,7 +202,7 @@ public void testHearbeat() throws InterruptedException, IOException { // Verify the locator has the same records for the other ns List normalNss = - namenodeResolver.getNamenodesForNameserviceId(normalNs); + namenodeResolver.getNamenodesForNameserviceId(normalNs, false); // Active active = normalNss.get(0); assertEquals(NAMENODES[0], active.getNamenodeId()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java index 4fae86b01d399..bae2dea3ceabf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java @@ -204,7 +204,7 @@ public void testNamenodeMonitoring() throws Exception { final List namespaceInfo = new ArrayList<>(); for (String nsId : nns.keySet()) { List nnReports = - resolver.getNamenodesForNameserviceId(nsId); + resolver.getNamenodesForNameserviceId(nsId, false); namespaceInfo.addAll(nnReports); } for (FederationNamenodeContext nnInfo : namespaceInfo) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeWebScheme.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeWebScheme.java index ab507aaf9ecd4..f23b02092a299 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeWebScheme.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeWebScheme.java @@ -194,7 +194,7 @@ private void testWebScheme(HttpConfig.Policy httpPolicy, final List namespaceInfo = new ArrayList<>(); for (String nsId : nns.keySet()) { List nnReports = - resolver.getNamenodesForNameserviceId(nsId); + resolver.getNamenodesForNameserviceId(nsId, false); namespaceInfo.addAll(nnReports); } for (FederationNamenodeContext nnInfo : namespaceInfo) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java index b2bfb2f5121bb..1054e5ac8cf97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java @@ -166,7 +166,7 @@ public void testRetryWhenOneNameServiceDown() throws Exception { private void registerInvalidNameReport() throws IOException { String ns0 = cluster.getNameservices().get(0); List origin = resolver - .getNamenodesForNameserviceId(ns0); + .getNamenodesForNameserviceId(ns0, false); FederationNamenodeContext nnInfo = origin.get(0); NamenodeStatusReport report = new NamenodeStatusReport(ns0, nnInfo.getNamenodeId(), nnInfo.getRpcAddress(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java index 0e3a8dd091512..faf71a7b545d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java @@ -31,7 +31,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import org.apache.hadoop.hdfs.qjournal.server.NewerTxnIdException; import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -524,9 +523,6 @@ public void selectInputStreams(Collection streams, selectRpcInputStreams(rpcStreams, fromTxnId, onlyDurableTxns); streams.addAll(rpcStreams); return; - } catch (NewerTxnIdException ntie) { - // normal situation, we requested newer IDs than any journal has. no new streams - return; } catch (IOException ioe) { LOG.warn("Encountered exception while tailing edits >= " + fromTxnId + " via RPC; falling back to streaming.", ioe); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 7726377538aeb..ffa613018c6a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -751,8 +751,18 @@ public GetJournaledEditsResponseProto getJournaledEdits(long sinceTxId, "it via " + DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY); } long highestTxId = getHighestWrittenTxId(); - if (sinceTxId > highestTxId) { - // Requested edits that don't exist yet and is newer than highestTxId. + if (sinceTxId == highestTxId + 1) { + // Requested edits that don't exist yet, but this is expected, + // because namenode always get the journaled edits with the sinceTxId + // equal to image.getLastAppliedTxId() + 1. Short-circuiting the cache here + // and returning a response with a count of 0. + metrics.rpcEmptyResponses.incr(); + return GetJournaledEditsResponseProto.newBuilder().setTxnCount(0).build(); + } else if (sinceTxId > highestTxId + 1) { + // Requested edits that don't exist yet and this is unexpected. Means that there is a lag + // in this journal that does not contain some edits that should exist. + // Throw one NewerTxnIdException to make namenode treat this response as an exception. + // More detailed info please refer to: HDFS-16659 and HDFS-16771. metrics.rpcEmptyResponses.incr(); throw new NewerTxnIdException( "Highest txn ID available in the journal is %d, but requested txns starting at %d.", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java index ad67cf481ae70..ab909aef2ecd8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java @@ -114,6 +114,8 @@ public class JournalNodeRpcServer implements QJournalProtocol, .setVerbose(false) .build(); + this.server.addTerseExceptions(NewerTxnIdException.class); + this.server.addTerseExceptions(JournaledEditsCache.CacheMissException.class); //Adding InterQJournalProtocolPB to server InterQJournalProtocolServerSideTranslatorPB diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 4df4b408b9e5c..dfe48f7bde1f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1067,6 +1067,26 @@ public void setBlocksReplWorkMultiplier(int newVal) { blocksReplWorkMultiplier = newVal; } + /** + * Updates the value used for pendingReconstruction timeout, which is set by + * {@code DFSConfigKeys. + * DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY} initially. + * + * @param newVal - Must be a positive non-zero integer. + */ + public void setReconstructionPendingTimeout(int newVal) { + ensurePositiveInt(newVal, + DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY); + pendingReconstruction.setTimeout(newVal * 1000L); + } + + /** Returns the current setting for pendingReconstruction timeout, set by + * {@code DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY}. + */ + public int getReconstructionPendingTimeout() { + return (int)(pendingReconstruction.getTimeout() / 1000L); + } + public int getDefaultStorageNum(BlockInfo block) { switch (block.getBlockType()) { case STRIPED: return ((BlockInfoStriped) block).getRealTotalBlockNum(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index a7a2e5488a0a3..b2c5cb0b557f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -1327,8 +1327,8 @@ private void refreshHostsReader(Configuration conf) throws IOException { // Update the file names and refresh internal includes and excludes list. if (conf == null) { conf = new HdfsConfiguration(); - this.hostConfigManager.setConf(conf); } + this.hostConfigManager.setConf(conf); this.hostConfigManager.refresh(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java index 3e56606197bb4..6c3b4c97bed37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java @@ -76,6 +76,14 @@ void start() { timerThread.start(); } + public void setTimeout(long timeoutPeriod) { + this.timeout = timeoutPeriod; + } + + public long getTimeout() { + return this.timeout; + } + /** * Add a block to the list of pending reconstructions * @param block The corresponding block diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 9e397b9114087..3d3b65d8e217b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -203,6 +203,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT; import static org.apache.hadoop.util.ExitUtil.terminate; import static org.apache.hadoop.util.ToolRunner.confirmPrompt; @@ -350,7 +352,8 @@ public enum OperationCategory { DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY, DFS_BLOCK_INVALIDATE_LIMIT_KEY, DFS_DATANODE_PEER_STATS_ENABLED_KEY, - DFS_DATANODE_MAX_NODES_TO_REPORT_KEY)); + DFS_DATANODE_MAX_NODES_TO_REPORT_KEY, + DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY)); private static final String USAGE = "Usage: hdfs namenode [" + StartupOption.BACKUP.getName() + "] | \n\t[" @@ -2301,7 +2304,8 @@ protected String reconfigurePropertyImpl(String property, String newVal) } else if (property.equals(DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY) || property.equals(DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY) || property.equals( - DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION)) { + DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION) + || property.equals(DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY)) { return reconfReplicationParameters(newVal, property); } else if (property.equals(DFS_BLOCK_REPLICATOR_CLASSNAME_KEY) || property .equals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY)) { @@ -2347,6 +2351,14 @@ private String reconfReplicationParameters(final String newVal, DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT, newVal)); newSetting = bm.getBlocksReplWorkMultiplier(); + } else if ( + property.equals( + DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY)) { + bm.setReconstructionPendingTimeout( + adjustNewVal( + DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT, + newVal)); + newSetting = bm.getReconstructionPendingTimeout(); } else { throw new IllegalArgumentException("Unexpected property " + property + " in reconfReplicationParameters"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index da19904cfbd01..bc317eb94dfce 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -4277,14 +4277,18 @@ org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault Class representing block placement policy for non-striped files. - There are four block placement policies currently being supported: + There are six block placement policies currently being supported: BlockPlacementPolicyDefault, BlockPlacementPolicyWithNodeGroup, - BlockPlacementPolicyRackFaultTolerant and BlockPlacementPolicyWithUpgradeDomain. + BlockPlacementPolicyRackFaultTolerant, BlockPlacementPolicyWithUpgradeDomain, + AvailableSpaceBlockPlacementPolicy and AvailableSpaceRackFaultTolerantBlockPlacementPolicy. BlockPlacementPolicyDefault chooses the desired number of targets for placing block replicas in a default way. BlockPlacementPolicyWithNodeGroup places block replicas on environment with node-group layer. BlockPlacementPolicyRackFaultTolerant places the replicas to more racks. BlockPlacementPolicyWithUpgradeDomain places block replicas that honors upgrade domain policy. + AvailableSpaceBlockPlacementPolicy places block replicas based on space balanced policy. + AvailableSpaceRackFaultTolerantBlockPlacementPolicy places block replicas based on + space balanced rack fault tolerant policy. The details of placing replicas are documented in the javadoc of the corresponding policy classes. The default policy is BlockPlacementPolicyDefault, and the corresponding class is org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md index b29a888475941..5201e0ee921eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md @@ -109,34 +109,34 @@ In the below mount table configuration, namespace `/data` is linked to the files ```xml - fs.viewfs.mounttable.ClusterX.link./data + fs.viewfs.mounttable.clusterX.link./data hdfs://nn1-clusterx.example.com:8020/data - fs.viewfs.mounttable.ClusterX.link./project + fs.viewfs.mounttable.clusterX.link./project hdfs://nn2-clusterx.example.com:8020/project - fs.viewfs.mounttable.ClusterX.link./user + fs.viewfs.mounttable.clusterX.link./user hdfs://nn3-clusterx.example.com:8020/user - fs.viewfs.mounttable.ClusterX.link./tmp + fs.viewfs.mounttable.clusterX.link./tmp hdfs://nn4-clusterx.example.com:8020/tmp - fs.viewfs.mounttable.ClusterX.linkFallback + fs.viewfs.mounttable.clusterX.linkFallback hdfs://nn5-clusterx.example.com:8020/home ``` -Alternatively we can have the mount table's root merged with the root of another filesystem via `linkMergeSlash`. In the below mount table configuration, ClusterY's root is merged with the root filesystem at `hdfs://nn1-clustery.example.com:8020`. +Alternatively we can have the mount table's root merged with the root of another filesystem via `linkMergeSlash`. In the below mount table configuration, clusterY's root is merged with the root filesystem at `hdfs://nn1-clustery.example.com:8020`. ```xml - fs.viewfs.mounttable.ClusterY.linkMergeSlash + fs.viewfs.mounttable.clusterY.linkMergeSlash hdfs://nn1-clustery.example.com:8020/ @@ -443,7 +443,7 @@ The mount tables can be described in `core-site.xml` but it is better to use ind ``` -In the file `mountTable.xml`, there is a definition of the mount table "ClusterX" for the hypothetical cluster that is a federation of the three namespace volumes managed by the three namenodes +In the file `mountTable.xml`, there is a definition of the mount table "clusterX" for the hypothetical cluster that is a federation of the three namespace volumes managed by the three namenodes 1. nn1-clusterx.example.com:8020, 2. nn2-clusterx.example.com:8020, and @@ -454,23 +454,23 @@ Here `/home` and `/tmp` are in the namespace managed by namenode nn1-clusterx.ex ```xml - fs.viewfs.mounttable.ClusterX.homedir + fs.viewfs.mounttable.clusterX.homedir /home - fs.viewfs.mounttable.ClusterX.link./home + fs.viewfs.mounttable.clusterX.link./home hdfs://nn1-clusterx.example.com:8020/home - fs.viewfs.mounttable.ClusterX.link./tmp + fs.viewfs.mounttable.clusterX.link./tmp hdfs://nn1-clusterx.example.com:8020/tmp - fs.viewfs.mounttable.ClusterX.link./projects/foo + fs.viewfs.mounttable.clusterX.link./projects/foo hdfs://nn2-clusterx.example.com:8020/projects/foo - fs.viewfs.mounttable.ClusterX.link./projects/bar + fs.viewfs.mounttable.clusterX.link./projects/bar hdfs://nn3-clusterx.example.com:8020/projects/bar diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index c4e99b1833a06..dd8bb2043828b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -2309,6 +2309,8 @@ public synchronized void restartNameNode(int nnIndex, boolean waitActive, nn.getHttpServer() .setAttribute(ImageServlet.RECENT_IMAGE_CHECK_ENABLED, false); info.nameNode = nn; + info.nameserviceId = info.conf.get(DFS_NAMESERVICE_ID); + info.nnId = info.conf.get(DFS_HA_NAMENODE_ID_KEY); info.setStartOpt(startOpt); if (waitActive) { if (numDataNodes > 0) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java index de738eef177a3..9638f71ef8286 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java @@ -81,21 +81,21 @@ public void testDatanodeReportWithUpgradeDomain() throws Exception { datanode.setUpgradeDomain(ud1); hostsFileWriter.initIncludeHosts( new DatanodeAdminProperties[]{datanode}); - client.refreshNodes(); + cluster.getNamesystem().getBlockManager().getDatanodeManager().refreshNodes(conf); DatanodeInfo[] all = client.datanodeReport(DatanodeReportType.ALL); assertEquals(all[0].getUpgradeDomain(), ud1); datanode.setUpgradeDomain(null); hostsFileWriter.initIncludeHosts( new DatanodeAdminProperties[]{datanode}); - client.refreshNodes(); + cluster.getNamesystem().getBlockManager().getDatanodeManager().refreshNodes(conf); all = client.datanodeReport(DatanodeReportType.ALL); assertEquals(all[0].getUpgradeDomain(), null); datanode.setUpgradeDomain(ud2); hostsFileWriter.initIncludeHosts( new DatanodeAdminProperties[]{datanode}); - client.refreshNodes(); + cluster.getNamesystem().getBlockManager().getDatanodeManager().refreshNodes(conf); all = client.datanodeReport(DatanodeReportType.ALL); assertEquals(all[0].getUpgradeDomain(), ud2); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java index 84ce7c2572482..e2ee2e365d9ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java @@ -42,8 +42,10 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.hdfs.server.common.Util; +import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.net.MockDomainNameResolver; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; @@ -1263,4 +1265,39 @@ private void checkRecovery(MiniJournalCluster cluster, segmentTxId); } } + + @Test + public void testSelectLatestEditsWithoutStreaming() throws Exception { + EditLogOutputStream stm = qjm.startLogSegment( + 1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); + // Successfully write these edits to JN0 ~ JN2 + writeTxns(stm, 1, 10); + + AtomicInteger atomicInteger = new AtomicInteger(0); + spyGetEditLogManifest(0, 11, true, atomicInteger::incrementAndGet); + spyGetEditLogManifest(1, 11, true, atomicInteger::incrementAndGet); + spyGetEditLogManifest(2, 11, true, atomicInteger::incrementAndGet); + + List streams = new ArrayList<>(); + qjm.selectInputStreams(streams, 1, true, true); + assertEquals(1, streams.size()); + assertEquals(1, streams.get(0).getFirstTxId()); + assertEquals(10, streams.get(0).getLastTxId()); + + streams.clear(); + qjm.selectInputStreams(streams, 11, true, true); + assertEquals(0, streams.size()); + assertEquals(0, atomicInteger.get()); + } + + private void spyGetEditLogManifest(int jnSpyIdx, long fromTxId, + boolean inProgressOk, Runnable preHook) { + Mockito.doAnswer((Answer>) invocation -> { + preHook.run(); + @SuppressWarnings("unchecked") + ListenableFuture result = + (ListenableFuture) invocation.callRealMethod(); + return result; + }).when(spies.get(jnSpyIdx)).getEditLogManifest(fromTxId, inProgressOk); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 7bb288809dea9..420635e012ac7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -2310,7 +2310,7 @@ private void testUpgradeDomain(boolean defineUpgradeDomain, dnProp.setPort(datanodeID.getXferPort()); dnProp.setUpgradeDomain(upgradeDomain); hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[]{dnProp}); - cluster.getFileSystem().refreshNodes(); + cluster.getNamesystem(0).getBlockManager().getDatanodeManager().refreshNodes(conf); } // create files diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java index e86413d8bf3dc..78ebcb4e2dfd6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.lang.management.ManagementFactory; @@ -175,4 +176,38 @@ public void testHostsIncludeForDeadCount() throws Exception { hostsFileWriter.cleanup(); } } + + @Test + public void testNewHostAndExcludeFile() throws Exception { + Configuration conf = getConf(); + + HostsFileWriter writer1 = new HostsFileWriter(); + writer1.initialize(conf, "old_temp/decommission"); + writer1.initIncludeHosts(new String[]{"localhost:52", "127.0.0.1:7777"}); + + // Write all hosts to a new dfs.hosts file. + HostsFileWriter writer2 = new HostsFileWriter(); + Configuration newConf = new Configuration(getConf()); + writer2.initialize(newConf, "new_temp/decommission"); + writer2.initIncludeHosts(new String[]{ + "localhost:52", "127.0.0.1:7777", "localhost:100"}); + + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + final FSNamesystem ns = cluster.getNameNode().getNamesystem(); + assertEquals(2, ns.getNumDeadDataNodes()); + assertEquals(0, ns.getNumLiveDataNodes()); + + ns.getBlockManager().getDatanodeManager().refreshNodes(newConf); + assertEquals(3, ns.getNumDeadDataNodes()); + assertEquals(0, ns.getNumLiveDataNodes()); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + writer1.cleanup(); + writer2.cleanup(); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshNamenodeReplicationConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshNamenodeReplicationConfig.java index 8dc81f8c1a21d..8336a432b9a41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshNamenodeReplicationConfig.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshNamenodeReplicationConfig.java @@ -49,6 +49,9 @@ public void setup() throws IOException { config.setInt( DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION, 12); + config.setInt( + DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, + 300); cluster = new MiniDFSCluster.Builder(config) .nnTopology(MiniDFSNNTopology.simpleSingleNN(0, 0)) @@ -72,6 +75,7 @@ public void testParamsCanBeReconfigured() throws ReconfigurationException { assertEquals(8, bm.getMaxReplicationStreams()); assertEquals(10, bm.getReplicationStreamsHardLimit()); assertEquals(12, bm.getBlocksReplWorkMultiplier()); + assertEquals(300, bm.getReconstructionPendingTimeout()); cluster.getNameNode().reconfigurePropertyImpl( DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, "20"); @@ -81,10 +85,14 @@ public void testParamsCanBeReconfigured() throws ReconfigurationException { cluster.getNameNode().reconfigurePropertyImpl( DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION, "24"); + cluster.getNameNode().reconfigurePropertyImpl( + DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, + "180"); assertEquals(20, bm.getMaxReplicationStreams()); assertEquals(22, bm.getReplicationStreamsHardLimit()); assertEquals(24, bm.getBlocksReplWorkMultiplier()); + assertEquals(180, bm.getReconstructionPendingTimeout()); } /** @@ -96,7 +104,8 @@ public void testReconfigureFailsWithInvalidValues() throws Exception { String[] keys = new String[]{ DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY, - DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION + DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION, + DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY }; // Ensure we cannot set any of the parameters negative @@ -112,6 +121,7 @@ public void testReconfigureFailsWithInvalidValues() throws Exception { assertEquals(8, bm.getMaxReplicationStreams()); assertEquals(10, bm.getReplicationStreamsHardLimit()); assertEquals(12, bm.getBlocksReplWorkMultiplier()); + assertEquals(300, bm.getReconstructionPendingTimeout()); for (String key : keys) { ReconfigurationException e = @@ -126,6 +136,7 @@ public void testReconfigureFailsWithInvalidValues() throws Exception { assertEquals(8, bm.getMaxReplicationStreams()); assertEquals(10, bm.getReplicationStreamsHardLimit()); assertEquals(12, bm.getBlocksReplWorkMultiplier()); + assertEquals(300, bm.getReconstructionPendingTimeout()); // Ensure none of the parameters can be set to a string value for (String key : keys) { @@ -139,5 +150,6 @@ public void testReconfigureFailsWithInvalidValues() throws Exception { assertEquals(8, bm.getMaxReplicationStreams()); assertEquals(10, bm.getReplicationStreamsHardLimit()); assertEquals(12, bm.getBlocksReplWorkMultiplier()); + assertEquals(300, bm.getReconstructionPendingTimeout()); } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java index 0421941f3f4c5..abd26d4f5a4f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java @@ -69,11 +69,11 @@ public class TestUpgradeDomainBlockPlacementPolicy { static final Set expectedDatanodeIDs = new HashSet<>(); private MiniDFSCluster cluster = null; private HostsFileWriter hostsFileWriter = new HostsFileWriter(); + private Configuration conf = new HdfsConfiguration(); @Before public void setup() throws IOException { StaticMapping.resetMap(); - Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2); conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, @@ -130,7 +130,7 @@ private void refreshDatanodeAdminProperties() datanodes[0].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED); datanodes[5].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED); hostsFileWriter.initIncludeHosts(datanodes); - cluster.getFileSystem().refreshNodes(); + cluster.getNamesystem(0).getBlockManager().getDatanodeManager().refreshNodes(conf); expectedDatanodeIDs.clear(); expectedDatanodeIDs.add(cluster.getDataNodes().get(2).getDatanodeId()); @@ -169,7 +169,7 @@ private void refreshDatanodeAdminProperties2() datanodes[2].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED); datanodes[3].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED); hostsFileWriter.initIncludeHosts(datanodes); - cluster.getFileSystem().refreshNodes(); + cluster.getNamesystem(0).getBlockManager().getDatanodeManager().refreshNodes(conf); expectedDatanodeIDs.clear(); expectedDatanodeIDs.add(cluster.getDataNodes().get(0).getDatanodeId()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java index a910117194507..60728284e59b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getServiceState; import static org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider.*; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.any; @@ -36,7 +37,10 @@ import java.util.List; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; @@ -61,9 +65,12 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLog; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer; import org.apache.hadoop.hdfs.server.namenode.TestFsck; import org.apache.hadoop.hdfs.tools.GetGroups; import org.apache.hadoop.ipc.ObserverRetryOnActiveException; +import org.apache.hadoop.ipc.metrics.RpcMetrics; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.junit.After; @@ -124,6 +131,43 @@ public static void shutDownCluster() throws IOException { } } + @Test + public void testObserverRequeue() throws Exception { + ScheduledExecutorService interruptor = + Executors.newScheduledThreadPool(1); + + FSNamesystem observerFsNS = dfsCluster.getNamesystem(2); + RpcMetrics obRpcMetrics = ((NameNodeRpcServer)dfsCluster + .getNameNodeRpc(2)).getClientRpcServer().getRpcMetrics(); + try { + // Stop EditlogTailer of Observer NameNode. + observerFsNS.getEditLogTailer().stop(); + long oldRequeueNum = obRpcMetrics.getRpcRequeueCalls(); + ScheduledFuture scheduledFuture = interruptor.schedule( + () -> { + Path tmpTestPath = new Path("/TestObserverRequeue"); + dfs.create(tmpTestPath, (short)1).close(); + assertSentTo(0); + // This operation will be blocked in ObserverNameNode + // until EditlogTailer tailed edits from journalNode. + FileStatus fileStatus = dfs.getFileStatus(tmpTestPath); + assertSentTo(2); + return fileStatus; + }, 0, TimeUnit.SECONDS); + + GenericTestUtils.waitFor(() -> obRpcMetrics.getRpcRequeueCalls() > oldRequeueNum, + 50, 10000); + + observerFsNS.getEditLogTailer().doTailEdits(); + FileStatus fileStatus = scheduledFuture.get(10000, TimeUnit.MILLISECONDS); + assertNotNull(fileStatus); + } finally { + EditLogTailer editLogTailer = new EditLogTailer(observerFsNS, conf); + observerFsNS.setEditLogTailerForTests(editLogTailer); + editLogTailer.start(); + } + } + @Test public void testNoActiveToObserver() throws Exception { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index 3df873a51ceae..99e4b348f6157 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -438,7 +438,7 @@ public void testNameNodeGetReconfigurableProperties() throws IOException, Interr final List outs = Lists.newArrayList(); final List errs = Lists.newArrayList(); getReconfigurableProperties("namenode", address, outs, errs); - assertEquals(19, outs.size()); + assertEquals(20, outs.size()); assertTrue(outs.get(0).contains("Reconfigurable properties:")); assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY, outs.get(1)); assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(2)); @@ -1266,4 +1266,4 @@ public void testAllDatanodesReconfig() outs.get(8)); } -} \ No newline at end of file +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java index d09b3cb1e56b3..d184d9be64bf8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java @@ -379,27 +379,38 @@ class EventProcessor implements Runnable { @Override public void run() { - LOG.info("Processing the event " + event.toString()); + LOG.info("Processing the event {}", event); // Load ContainerManager tokens before creating a connection. // TODO: Do it only once per NodeManager. ContainerId containerID = event.getContainerID(); - Container c = getContainer(event); switch(event.getType()) { case CONTAINER_REMOTE_LAUNCH: ContainerRemoteLaunchEvent launchEvent = (ContainerRemoteLaunchEvent) event; - c.launch(launchEvent); + getContainer(event).launch(launchEvent); break; case CONTAINER_REMOTE_CLEANUP: - c.kill(event.getDumpContainerThreads()); + // If the container failed to launch earlier (due to dead node for example), + // it has been marked as FAILED and removed from containers during + // CONTAINER_REMOTE_LAUNCH event handling. + // Skip kill() such container during CONTAINER_REMOTE_CLEANUP as + // it is not necessary and could cost 15 minutes delay if the node is dead. + if (!containers.containsKey(containerID)) { + LOG.info("Skip cleanup of already-removed container {}", containerID); + // send killed event to task attempt regardless like in kill(). + context.getEventHandler().handle(new TaskAttemptEvent(event.getTaskAttemptID(), + TaskAttemptEventType.TA_CONTAINER_CLEANED)); + return; + } + getContainer(event).kill(event.getDumpContainerThreads()); break; case CONTAINER_COMPLETED: - c.done(); + getContainer(event).done(); break; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java index 2057cc80ff01a..88ba8943ceb3f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java @@ -209,14 +209,11 @@ public void testHandle() throws Exception { ut.waitForPoolToIdle(); verify(mockCM).startContainers(any(StartContainersRequest.class)); - + LOG.info("inserting cleanup event"); - ContainerLauncherEvent mockCleanupEvent = - mock(ContainerLauncherEvent.class); - when(mockCleanupEvent.getType()) - .thenReturn(EventType.CONTAINER_REMOTE_CLEANUP); - when(mockCleanupEvent.getContainerID()) - .thenReturn(contId); + ContainerLauncherEvent mockCleanupEvent = mock(ContainerLauncherEvent.class); + when(mockCleanupEvent.getType()).thenReturn(EventType.CONTAINER_REMOTE_CLEANUP); + when(mockCleanupEvent.getContainerID()).thenReturn(contId); when(mockCleanupEvent.getTaskAttemptID()).thenReturn(taskAttemptId); when(mockCleanupEvent.getContainerMgrAddress()).thenReturn(cmAddress); ut.handle(mockCleanupEvent); @@ -283,8 +280,21 @@ public void testOutOfOrder() throws Exception { ut.handle(mockLaunchEvent); ut.waitForPoolToIdle(); - - verify(mockCM, never()).startContainers(any(StartContainersRequest.class)); + + verify(mockCM).startContainers(any(StartContainersRequest.class)); + + LOG.info("inserting cleanup event"); + ContainerLauncherEvent mockCleanupEvent2 = mock(ContainerLauncherEvent.class); + when(mockCleanupEvent2.getType()).thenReturn(EventType.CONTAINER_REMOTE_CLEANUP); + when(mockCleanupEvent2.getContainerID()).thenReturn(contId); + when(mockCleanupEvent2.getTaskAttemptID()).thenReturn(taskAttemptId); + when(mockCleanupEvent2.getContainerMgrAddress()).thenReturn(cmAddress); + ut.handle(mockCleanupEvent2); + + ut.waitForPoolToIdle(); + + // Verifies stopContainers is called on existing container + verify(mockCM).stopContainers(any(StopContainersRequest.class)); } finally { ut.stop(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/BaseTestLineRecordReaderBZip2.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/BaseTestLineRecordReaderBZip2.java index da6bedde13149..1e06f12656930 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/BaseTestLineRecordReaderBZip2.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/BaseTestLineRecordReaderBZip2.java @@ -19,8 +19,10 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.StringJoiner; import org.junit.After; import org.junit.Before; @@ -34,6 +36,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; import static org.apache.hadoop.io.compress.bzip2.BZip2TextFileWriter.BLOCK_SIZE; +import static org.apache.hadoop.util.Preconditions.checkArgument; import static org.junit.Assert.assertEquals; public abstract class BaseTestLineRecordReaderBZip2 { @@ -306,6 +309,8 @@ private void assertRecordCountsPerSplit( countAssert.assertSingleSplit(); countAssert.assertSplittingAtBlocks(); countAssert.assertSplittingJustAfterSecondBlockStarts(); + countAssert.assertSplittingEachBlockRangeInThreeParts(); + countAssert.assertSplitsAroundBlockStartOffsets(); } private class RecordCountAssert { @@ -334,16 +339,7 @@ private void assertSingleSplit() throws IOException { } private void assertSplittingAtBlocks() throws IOException { - for (int i = 0; i < numBlocks; i++) { - long start = i == 0 ? 0 : nextBlockOffsets.get(i - 1); - long end = i == numBlocks - 1 ? fileSize : nextBlockOffsets.get(i); - long length = end - start; - - String message = "At i=" + i; - long expectedCount = countsIfSplitAtBlocks[i]; - assertEquals( - message, expectedCount, reader.countRecords(start, length)); - } + assertSplits(getSplitsAtBlocks()); } private void assertSplittingJustAfterSecondBlockStarts() @@ -363,6 +359,123 @@ private void assertSplittingJustAfterSecondBlockStarts() remainingRecords, reader.countRecords(firstSplitSize, fileSize - firstSplitSize)); } + + private void assertSplittingEachBlockRangeInThreeParts() + throws IOException { + for (SplitRange splitRange : getSplitsAtBlocks()) { + long[] expectedNumRecordsPerPart = new long[] { + splitRange.expectedNumRecords, 0, 0 + }; + List parts = splitRange.divide(expectedNumRecordsPerPart); + assertSplits(parts); + } + } + + private void assertSplitsAroundBlockStartOffsets() + throws IOException { + for (SplitRange split : getSplitsAtBlocks()) { + assertSplit(split.withLength(1)); + if (split.start > 0) { + assertSplit(split.moveBy(-2).withLength(3)); + assertSplit(split.moveBy(-2).withLength(2).withExpectedNumRecords(0)); + assertSplit(split.moveBy(-1).withLength(2)); + assertSplit(split.moveBy(-1).withLength(1).withExpectedNumRecords(0)); + } + assertSplit(split.moveBy(1).withLength(1).withExpectedNumRecords(0)); + assertSplit(split.moveBy(2).withLength(1).withExpectedNumRecords(0)); + } + } + + private List getSplitsAtBlocks() { + List splits = new ArrayList<>(); + for (int i = 0; i < numBlocks; i++) { + String name = "Block" + i; + long start = i == 0 ? 0 : nextBlockOffsets.get(i - 1); + long end = i == numBlocks - 1 ? fileSize : nextBlockOffsets.get(i); + long length = end - start; + long expectedNumRecords = countsIfSplitAtBlocks[i]; + splits.add(new SplitRange(name, start, length, expectedNumRecords)); + } + return splits; + } + + private void assertSplits(Iterable splitRanges) + throws IOException { + for (SplitRange splitRange : splitRanges) { + assertSplit(splitRange); + } + } + + private void assertSplit(SplitRange splitRange) throws IOException { + String message = splitRange.toString(); + long actual = reader.countRecords(splitRange.start, splitRange.length); + assertEquals(message, splitRange.expectedNumRecords, actual); + } + } + + private static class SplitRange { + final private String name; + final private long start; + final private long length; + final private long expectedNumRecords; + + SplitRange( + String name, + long start, + long length, + long expectedNumRecords) { + this.name = name; + this.start = start; + this.length = length; + this.expectedNumRecords = expectedNumRecords; + } + + @Override + public String toString() { + return new StringJoiner(", ", SplitRange.class.getSimpleName() + "[", "]") + .add("name='" + name + "'") + .add("start=" + start) + .add("length=" + length) + .add("expectedNumRecords=" + expectedNumRecords) + .toString(); + } + + List divide(long[] expectedNumRecordsPerPart) { + int numParts = expectedNumRecordsPerPart.length; + checkArgument(numParts > 0); + + long minPartSize = length / numParts; + checkArgument(minPartSize > 0); + long lastPartExtraSize = length % numParts; + + List partRanges = new ArrayList<>(); + long partStart = start; + for (int i = 0; i < numParts; i++) { + String partName = name + "_Part" + i; + + long extraSize = i == numParts - 1 ? lastPartExtraSize : 0; + long partSize = minPartSize + extraSize; + + long partExpectedNumRecords = expectedNumRecordsPerPart[i]; + + partRanges.add(new SplitRange( + partName, partStart, partSize, partExpectedNumRecords)); + partStart += partSize; + } + return partRanges; + } + + SplitRange withLength(long newLength) { + return new SplitRange(name, start, newLength, expectedNumRecords); + } + + SplitRange withExpectedNumRecords(long newExpectedNumRecords) { + return new SplitRange(name, start, length, newExpectedNumRecords); + } + + SplitRange moveBy(long delta) { + return new SplitRange(name, start + delta, length, expectedNumRecords); + } } private long getFileSize(Path path) throws IOException { diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 6649fa574007e..6d5509b2fa68e 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -124,9 +124,9 @@ 1.1 3.6.1 3.8.0 - 1.4 + 1.9 - 1.0.1 + 2.0.2 1.0-alpha-1 3.3.1 4.0.3 @@ -185,7 +185,7 @@ 1.0-beta-1 900 1.12.262 - 2.3.4 + 2.5.2 1.11.2 2.1 0.7 @@ -1206,7 +1206,7 @@ org.apache.commons commons-configuration2 - 2.1.1 + 2.8.0 org.apache.commons @@ -1782,6 +1782,12 @@ org.apache.kerby kerb-simplekdc ${kerby.version} + + + org.jboss.xnio + xnio-api + + org.apache.kerby diff --git a/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml b/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml index 324369076b84b..5a2f172186238 100644 --- a/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml +++ b/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml @@ -59,6 +59,10 @@ + + + + diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml index 6d9085b51a078..6ebf1c71f0d5b 100644 --- a/hadoop-tools/hadoop-aws/pom.xml +++ b/hadoop-tools/hadoop-aws/pom.xml @@ -53,6 +53,9 @@ false + + + unset @@ -127,6 +130,8 @@ ${fs.s3a.directory.marker.retention} ${fs.s3a.directory.marker.audit} + + ${fs.s3a.prefetch.enabled} @@ -166,6 +171,8 @@ ${fs.s3a.directory.marker.retention} ${test.integration.timeout} + + ${fs.s3a.prefetch.enabled} @@ -215,6 +222,8 @@ ${fs.s3a.directory.marker.retention} ${fs.s3a.directory.marker.audit} + + ${fs.s3a.prefetch.enabled} @@ -271,6 +280,8 @@ ${fs.s3a.directory.marker.retention} ${fs.s3a.directory.marker.audit} + + ${fs.s3a.prefetch.enabled} ${fs.s3a.scale.test.timeout} @@ -332,6 +343,19 @@ + + + prefetch + + + prefetch + + + + true + + + diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index c187026450c90..f8a014c8933c0 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -633,17 +633,11 @@ public void initialize(URI name, Configuration originalConf) // amazon client exception: stop all services then throw the translation cleanupWithLogger(LOG, span); stopAllServices(); - if (this.futurePool != null) { - this.futurePool = null; - } throw translateException("initializing ", new Path(name), e); } catch (IOException | RuntimeException e) { // other exceptions: stop the services. cleanupWithLogger(LOG, span); stopAllServices(); - if (this.futurePool != null) { - this.futurePool = null; - } throw e; } } @@ -4038,6 +4032,10 @@ protected synchronized void stopAllServices() { HadoopExecutors.shutdown(unboundedThreadPool, LOG, THREAD_POOL_SHUTDOWN_DELAY_SECONDS, TimeUnit.SECONDS); unboundedThreadPool = null; + if (futurePool != null) { + futurePool.shutdown(LOG, THREAD_POOL_SHUTDOWN_DELAY_SECONDS, TimeUnit.SECONDS); + futurePool = null; + } // other services are shutdown. cleanupWithLogger(LOG, instrumentation, diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md index bbe1d0f808194..d2ed9ede0171c 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md @@ -617,6 +617,20 @@ your `core-site.xml` file, so that trying to use S3 select fails fast with a meaningful error ("S3 Select not supported") rather than a generic Bad Request exception. +### Enabling prefetch for all tests + +The tests are run with prefetch if the `prefetch` property is set in the +maven build. This can be combined with the scale tests as well. + +```bash +mvn verify -Dprefetch + +mvn verify -Dparallel-tests -Dprefetch -DtestsThreadCount=8 + +mvn verify -Dparallel-tests -Dprefetch -Dscale -DtestsThreadCount=8 +``` + + ### Testing Requester Pays By default, the requester pays tests will look for a bucket that exists on Amazon S3 diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java index 08ef7edf43b0a..469562f9b33b9 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java @@ -575,6 +575,10 @@ public static Configuration prepareTestConfiguration(final Configuration conf) { DEFAULT_DIRECTORY_MARKER_POLICY); conf.set(DIRECTORY_MARKER_POLICY, directoryRetention); + boolean prefetchEnabled = + getTestPropertyBool(conf, PREFETCH_ENABLED_KEY, PREFETCH_ENABLED_DEFAULT); + conf.setBoolean(PREFETCH_ENABLED_KEY, prefetchEnabled); + return conf; } diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreTables.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreTables.sql index 8a0941698e6be..d61a10f998b3e 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreTables.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreTables.sql @@ -37,7 +37,7 @@ CREATE TABLE membership( state varchar(32) NOT NULL, lastStartTime bigint NULL, capability varchar(6000), - CONSTRAINT pk_subClusterId PRIMARY KEY (subClusterId) + CONSTRAINT pk_subClusterId PRIMARY KEY (subClusterId), UNIQUE(lastStartTime) ); diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoredProcs.sql similarity index 100% rename from hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql rename to hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoredProcs.sql diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreTables.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreTables.sql index 84f28acc17461..fb8a1bff554b3 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreTables.sql +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreTables.sql @@ -77,7 +77,7 @@ IF NOT EXISTS ( SELECT * FROM [FederationStateStore].sys.tables CONSTRAINT [pk_subClusterId] PRIMARY KEY ( [subClusterId] - ) + ), CONSTRAINT [uc_lastStartTime] UNIQUE ( [lastStartTime] @@ -140,7 +140,7 @@ IF NOT EXISTS ( SELECT * FROM [FederationStateStore].sys.tables CREATE TABLE [dbo].[reservationsHomeSubCluster]( reservationId VARCHAR(128) COLLATE Latin1_General_100_BIN2 NOT NULL, homeSubCluster VARCHAR(256) NOT NULL, - createTime DATETIME2 NOT NULL CONSTRAINT ts_createAppTime DEFAULT GETUTCDATE(), + createTime DATETIME2 NOT NULL CONSTRAINT ts_createResTime DEFAULT GETUTCDATE(), CONSTRAINT [pk_reservationId] PRIMARY KEY ( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 6715c92fe6b95..d5e120695e739 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1552,6 +1552,13 @@ public static boolean isAclEnabled(Configuration conf) { public static final long DEFAULT_LOG_AGGREGATION_STATUS_TIME_OUT_MS = 10 * 60 * 1000; + /** + * Whether to clean up nodemanager logs when log aggregation is enabled. + */ + public static final String LOG_AGGREGATION_ENABLE_LOCAL_CLEANUP = + YARN_PREFIX + "log-aggregation.enable-local-cleanup"; + public static final boolean DEFAULT_LOG_AGGREGATION_ENABLE_LOCAL_CLEANUP = true; + /** * Number of seconds to retain logs on the NodeManager. Only applicable if Log * aggregation is disabled diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml index a04ff52be9749..86249f0a88c19 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml @@ -254,7 +254,7 @@ src/main/resources/webapps/static/dt-1.10.18/images/Sorting icons.psd src/main/resources/webapps/static/dt-1.10.18/js/jquery.dataTables.min.js src/main/resources/webapps/static/jt/jquery.jstree.js - src/main/resources/webapps/static/jquery/jquery-ui-1.13.1.custom.min.js + src/main/resources/webapps/static/jquery/jquery-ui-1.13.2.custom.min.js src/main/resources/webapps/static/jquery/jquery-3.6.0.min.js src/main/resources/webapps/static/jquery/themes-1.9.1/base/jquery-ui.css src/test/resources/application_1440536969523_0001.har/_index diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java index 66ea04c0bf797..8c8abc5a0bc6f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java @@ -69,7 +69,7 @@ protected void render(Block html) { .link(root_url("static/dt-1.10.18/css/jui-dt.css")) .link(root_url("static/dt-1.10.18/css/custom_datatable.css")) .script(root_url("static/jquery/jquery-3.6.0.min.js")) - .script(root_url("static/jquery/jquery-ui-1.13.1.custom.min.js")) + .script(root_url("static/jquery/jquery-ui-1.13.2.custom.min.js")) .script(root_url("static/dt-1.10.18/js/jquery.dataTables.min.js")) .script(root_url("static/yarn.dt.plugins.js")) .script(root_url("static/dt-sorting/natural.js")) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.13.1.custom.min.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.13.1.custom.min.js deleted file mode 100644 index de4b5fc8d1a9b..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery/jquery-ui-1.13.1.custom.min.js +++ /dev/null @@ -1,6 +0,0 @@ -/*! jQuery UI - v1.13.1 - 2022-04-24 -* http://jqueryui.com -* Includes: widget.js, position.js, data.js, disable-selection.js, focusable.js, form-reset-mixin.js, jquery-patch.js, keycode.js, labels.js, scroll-parent.js, tabbable.js, unique-id.js, widgets/draggable.js, widgets/droppable.js, widgets/resizable.js, widgets/selectable.js, widgets/sortable.js, widgets/accordion.js, widgets/autocomplete.js, widgets/button.js, widgets/checkboxradio.js, widgets/controlgroup.js, widgets/datepicker.js, widgets/dialog.js, widgets/menu.js, widgets/mouse.js, widgets/progressbar.js, widgets/selectmenu.js, widgets/slider.js, widgets/spinner.js, widgets/tabs.js, widgets/tooltip.js, effect.js, effects/effect-blind.js, effects/effect-bounce.js, effects/effect-clip.js, effects/effect-drop.js, effects/effect-explode.js, effects/effect-fade.js, effects/effect-fold.js, effects/effect-highlight.js, effects/effect-puff.js, effects/effect-pulsate.js, effects/effect-scale.js, effects/effect-shake.js, effects/effect-size.js, effects/effect-slide.js, effects/effect-transfer.js -* Copyright jQuery Foundation and other contributors; Licensed MIT */ - -!function(t){"use strict";"function"==typeof define&&define.amd?define(["jquery"],t):t(jQuery)}(function(V){"use strict";V.ui=V.ui||{};V.ui.version="1.13.1";var n,i=0,a=Array.prototype.hasOwnProperty,r=Array.prototype.slice;V.cleanData=(n=V.cleanData,function(t){for(var e,i,s=0;null!=(i=t[s]);s++)(e=V._data(i,"events"))&&e.remove&&V(i).triggerHandler("remove");n(t)}),V.widget=function(t,i,e){var s,n,o,a={},r=t.split(".")[0],l=r+"-"+(t=t.split(".")[1]);return e||(e=i,i=V.Widget),Array.isArray(e)&&(e=V.extend.apply(null,[{}].concat(e))),V.expr.pseudos[l.toLowerCase()]=function(t){return!!V.data(t,l)},V[r]=V[r]||{},s=V[r][t],n=V[r][t]=function(t,e){if(!this||!this._createWidget)return new n(t,e);arguments.length&&this._createWidget(t,e)},V.extend(n,s,{version:e.version,_proto:V.extend({},e),_childConstructors:[]}),(o=new i).options=V.widget.extend({},o.options),V.each(e,function(e,s){function n(){return i.prototype[e].apply(this,arguments)}function o(t){return i.prototype[e].apply(this,t)}a[e]="function"==typeof s?function(){var t,e=this._super,i=this._superApply;return this._super=n,this._superApply=o,t=s.apply(this,arguments),this._super=e,this._superApply=i,t}:s}),n.prototype=V.widget.extend(o,{widgetEventPrefix:s&&o.widgetEventPrefix||t},a,{constructor:n,namespace:r,widgetName:t,widgetFullName:l}),s?(V.each(s._childConstructors,function(t,e){var i=e.prototype;V.widget(i.namespace+"."+i.widgetName,n,e._proto)}),delete s._childConstructors):i._childConstructors.push(n),V.widget.bridge(t,n),n},V.widget.extend=function(t){for(var e,i,s=r.call(arguments,1),n=0,o=s.length;n",options:{classes:{},disabled:!1,create:null},_createWidget:function(t,e){e=V(e||this.defaultElement||this)[0],this.element=V(e),this.uuid=i++,this.eventNamespace="."+this.widgetName+this.uuid,this.bindings=V(),this.hoverable=V(),this.focusable=V(),this.classesElementLookup={},e!==this&&(V.data(e,this.widgetFullName,this),this._on(!0,this.element,{remove:function(t){t.target===e&&this.destroy()}}),this.document=V(e.style?e.ownerDocument:e.document||e),this.window=V(this.document[0].defaultView||this.document[0].parentWindow)),this.options=V.widget.extend({},this.options,this._getCreateOptions(),t),this._create(),this.options.disabled&&this._setOptionDisabled(this.options.disabled),this._trigger("create",null,this._getCreateEventData()),this._init()},_getCreateOptions:function(){return{}},_getCreateEventData:V.noop,_create:V.noop,_init:V.noop,destroy:function(){var i=this;this._destroy(),V.each(this.classesElementLookup,function(t,e){i._removeClass(e,t)}),this.element.off(this.eventNamespace).removeData(this.widgetFullName),this.widget().off(this.eventNamespace).removeAttr("aria-disabled"),this.bindings.off(this.eventNamespace)},_destroy:V.noop,widget:function(){return this.element},option:function(t,e){var i,s,n,o=t;if(0===arguments.length)return V.widget.extend({},this.options);if("string"==typeof t)if(o={},t=(i=t.split(".")).shift(),i.length){for(s=o[t]=V.widget.extend({},this.options[t]),n=0;n
"),i=e.children()[0];return V("body").append(e),t=i.offsetWidth,e.css("overflow","scroll"),t===(i=i.offsetWidth)&&(i=e[0].clientWidth),e.remove(),s=t-i},getScrollInfo:function(t){var e=t.isWindow||t.isDocument?"":t.element.css("overflow-x"),i=t.isWindow||t.isDocument?"":t.element.css("overflow-y"),e="scroll"===e||"auto"===e&&t.widthx(k(s),k(n))?o.important="horizontal":o.important="vertical",u.using.call(this,t,o)}),a.offset(V.extend(h,{using:t}))})},V.ui.position={fit:{left:function(t,e){var i=e.within,s=i.isWindow?i.scrollLeft:i.offset.left,n=i.width,o=t.left-e.collisionPosition.marginLeft,a=s-o,r=o+e.collisionWidth-n-s;e.collisionWidth>n?0n?0=this.options.distance},_mouseDelayMet:function(){return this.mouseDelayMet},_mouseStart:function(){},_mouseDrag:function(){},_mouseStop:function(){},_mouseCapture:function(){return!0}}),V.ui.plugin={add:function(t,e,i){var s,n=V.ui[t].prototype;for(s in i)n.plugins[s]=n.plugins[s]||[],n.plugins[s].push([e,i[s]])},call:function(t,e,i,s){var n,o=t.plugins[e];if(o&&(s||t.element[0].parentNode&&11!==t.element[0].parentNode.nodeType))for(n=0;n").css("position","absolute").appendTo(t.parent()).outerWidth(t.outerWidth()).outerHeight(t.outerHeight()).offset(t.offset())[0]})},_unblockFrames:function(){this.iframeBlocks&&(this.iframeBlocks.remove(),delete this.iframeBlocks)},_blurActiveElement:function(t){var e=V.ui.safeActiveElement(this.document[0]);V(t.target).closest(e).length||V.ui.safeBlur(e)},_mouseStart:function(t){var e=this.options;return this.helper=this._createHelper(t),this._addClass(this.helper,"ui-draggable-dragging"),this._cacheHelperProportions(),V.ui.ddmanager&&(V.ui.ddmanager.current=this),this._cacheMargins(),this.cssPosition=this.helper.css("position"),this.scrollParent=this.helper.scrollParent(!0),this.offsetParent=this.helper.offsetParent(),this.hasFixedAncestor=0i[2]&&(o=i[2]+this.offset.click.left),t.pageY-this.offset.click.top>i[3]&&(a=i[3]+this.offset.click.top)),s.grid&&(t=s.grid[1]?this.originalPageY+Math.round((a-this.originalPageY)/s.grid[1])*s.grid[1]:this.originalPageY,a=!i||t-this.offset.click.top>=i[1]||t-this.offset.click.top>i[3]?t:t-this.offset.click.top>=i[1]?t-s.grid[1]:t+s.grid[1],t=s.grid[0]?this.originalPageX+Math.round((o-this.originalPageX)/s.grid[0])*s.grid[0]:this.originalPageX,o=!i||t-this.offset.click.left>=i[0]||t-this.offset.click.left>i[2]?t:t-this.offset.click.left>=i[0]?t-s.grid[0]:t+s.grid[0]),"y"===s.axis&&(o=this.originalPageX),"x"===s.axis&&(a=this.originalPageY)),{top:a-this.offset.click.top-this.offset.relative.top-this.offset.parent.top+("fixed"===this.cssPosition?-this.offset.scroll.top:n?0:this.offset.scroll.top),left:o-this.offset.click.left-this.offset.relative.left-this.offset.parent.left+("fixed"===this.cssPosition?-this.offset.scroll.left:n?0:this.offset.scroll.left)}},_clear:function(){this._removeClass(this.helper,"ui-draggable-dragging"),this.helper[0]===this.element[0]||this.cancelHelperRemoval||this.helper.remove(),this.helper=null,this.cancelHelperRemoval=!1,this.destroyOnClear&&this.destroy()},_trigger:function(t,e,i){return i=i||this._uiHash(),V.ui.plugin.call(this,t,[e,i,this],!0),/^(drag|start|stop)/.test(t)&&(this.positionAbs=this._convertPositionTo("absolute"),i.offset=this.positionAbs),V.Widget.prototype._trigger.call(this,t,e,i)},plugins:{},_uiHash:function(){return{helper:this.helper,position:this.position,originalPosition:this.originalPosition,offset:this.positionAbs}}}),V.ui.plugin.add("draggable","connectToSortable",{start:function(e,t,i){var s=V.extend({},t,{item:i.element});i.sortables=[],V(i.options.connectToSortable).each(function(){var t=V(this).sortable("instance");t&&!t.options.disabled&&(i.sortables.push(t),t.refreshPositions(),t._trigger("activate",e,s))})},stop:function(e,t,i){var s=V.extend({},t,{item:i.element});i.cancelHelperRemoval=!1,V.each(i.sortables,function(){var t=this;t.isOver?(t.isOver=0,i.cancelHelperRemoval=!0,t.cancelHelperRemoval=!1,t._storedCSS={position:t.placeholder.css("position"),top:t.placeholder.css("top"),left:t.placeholder.css("left")},t._mouseStop(e),t.options.helper=t.options._helper):(t.cancelHelperRemoval=!0,t._trigger("deactivate",e,s))})},drag:function(i,s,n){V.each(n.sortables,function(){var t=!1,e=this;e.positionAbs=n.positionAbs,e.helperProportions=n.helperProportions,e.offset.click=n.offset.click,e._intersectsWith(e.containerCache)&&(t=!0,V.each(n.sortables,function(){return this.positionAbs=n.positionAbs,this.helperProportions=n.helperProportions,this.offset.click=n.offset.click,t=this!==e&&this._intersectsWith(this.containerCache)&&V.contains(e.element[0],this.element[0])?!1:t})),t?(e.isOver||(e.isOver=1,n._parent=s.helper.parent(),e.currentItem=s.helper.appendTo(e.element).data("ui-sortable-item",!0),e.options._helper=e.options.helper,e.options.helper=function(){return s.helper[0]},i.target=e.currentItem[0],e._mouseCapture(i,!0),e._mouseStart(i,!0,!0),e.offset.click.top=n.offset.click.top,e.offset.click.left=n.offset.click.left,e.offset.parent.left-=n.offset.parent.left-e.offset.parent.left,e.offset.parent.top-=n.offset.parent.top-e.offset.parent.top,n._trigger("toSortable",i),n.dropped=e.element,V.each(n.sortables,function(){this.refreshPositions()}),n.currentItem=n.element,e.fromOutside=n),e.currentItem&&(e._mouseDrag(i),s.position=e.position)):e.isOver&&(e.isOver=0,e.cancelHelperRemoval=!0,e.options._revert=e.options.revert,e.options.revert=!1,e._trigger("out",i,e._uiHash(e)),e._mouseStop(i,!0),e.options.revert=e.options._revert,e.options.helper=e.options._helper,e.placeholder&&e.placeholder.remove(),s.helper.appendTo(n._parent),n._refreshOffsets(i),s.position=n._generatePosition(i,!0),n._trigger("fromSortable",i),n.dropped=!1,V.each(n.sortables,function(){this.refreshPositions()}))})}}),V.ui.plugin.add("draggable","cursor",{start:function(t,e,i){var s=V("body"),i=i.options;s.css("cursor")&&(i._cursor=s.css("cursor")),s.css("cursor",i.cursor)},stop:function(t,e,i){i=i.options;i._cursor&&V("body").css("cursor",i._cursor)}}),V.ui.plugin.add("draggable","opacity",{start:function(t,e,i){e=V(e.helper),i=i.options;e.css("opacity")&&(i._opacity=e.css("opacity")),e.css("opacity",i.opacity)},stop:function(t,e,i){i=i.options;i._opacity&&V(e.helper).css("opacity",i._opacity)}}),V.ui.plugin.add("draggable","scroll",{start:function(t,e,i){i.scrollParentNotHidden||(i.scrollParentNotHidden=i.helper.scrollParent(!1)),i.scrollParentNotHidden[0]!==i.document[0]&&"HTML"!==i.scrollParentNotHidden[0].tagName&&(i.overflowOffset=i.scrollParentNotHidden.offset())},drag:function(t,e,i){var s=i.options,n=!1,o=i.scrollParentNotHidden[0],a=i.document[0];o!==a&&"HTML"!==o.tagName?(s.axis&&"x"===s.axis||(i.overflowOffset.top+o.offsetHeight-t.pageY").css({overflow:"hidden",position:this.element.css("position"),width:this.element.outerWidth(),height:this.element.outerHeight(),top:this.element.css("top"),left:this.element.css("left")})),this.element=this.element.parent().data("ui-resizable",this.element.resizable("instance")),this.elementIsWrapper=!0,t={marginTop:this.originalElement.css("marginTop"),marginRight:this.originalElement.css("marginRight"),marginBottom:this.originalElement.css("marginBottom"),marginLeft:this.originalElement.css("marginLeft")},this.element.css(t),this.originalElement.css("margin",0),this.originalResizeStyle=this.originalElement.css("resize"),this.originalElement.css("resize","none"),this._proportionallyResizeElements.push(this.originalElement.css({position:"static",zoom:1,display:"block"})),this.originalElement.css(t),this._proportionallyResize()),this._setupHandles(),e.autoHide&&V(this.element).on("mouseenter",function(){e.disabled||(i._removeClass("ui-resizable-autohide"),i._handles.show())}).on("mouseleave",function(){e.disabled||i.resizing||(i._addClass("ui-resizable-autohide"),i._handles.hide())}),this._mouseInit()},_destroy:function(){this._mouseDestroy(),this._addedHandles.remove();function t(t){V(t).removeData("resizable").removeData("ui-resizable").off(".resizable")}var e;return this.elementIsWrapper&&(t(this.element),e=this.element,this.originalElement.css({position:e.css("position"),width:e.outerWidth(),height:e.outerHeight(),top:e.css("top"),left:e.css("left")}).insertAfter(e),e.remove()),this.originalElement.css("resize",this.originalResizeStyle),t(this.originalElement),this},_setOption:function(t,e){switch(this._super(t,e),t){case"handles":this._removeHandles(),this._setupHandles();break;case"aspectRatio":this._aspectRatio=!!e}},_setupHandles:function(){var t,e,i,s,n,o=this.options,a=this;if(this.handles=o.handles||(V(".ui-resizable-handle",this.element).length?{n:".ui-resizable-n",e:".ui-resizable-e",s:".ui-resizable-s",w:".ui-resizable-w",se:".ui-resizable-se",sw:".ui-resizable-sw",ne:".ui-resizable-ne",nw:".ui-resizable-nw"}:"e,s,se"),this._handles=V(),this._addedHandles=V(),this.handles.constructor===String)for("all"===this.handles&&(this.handles="n,e,s,w,se,sw,ne,nw"),i=this.handles.split(","),this.handles={},e=0;e"),this._addClass(n,"ui-resizable-handle "+s),n.css({zIndex:o.zIndex}),this.handles[t]=".ui-resizable-"+t,this.element.children(this.handles[t]).length||(this.element.append(n),this._addedHandles=this._addedHandles.add(n));this._renderAxis=function(t){var e,i,s;for(e in t=t||this.element,this.handles)this.handles[e].constructor===String?this.handles[e]=this.element.children(this.handles[e]).first().show():(this.handles[e].jquery||this.handles[e].nodeType)&&(this.handles[e]=V(this.handles[e]),this._on(this.handles[e],{mousedown:a._mouseDown})),this.elementIsWrapper&&this.originalElement[0].nodeName.match(/^(textarea|input|select|button)$/i)&&(i=V(this.handles[e],this.element),s=/sw|ne|nw|se|n|s/.test(e)?i.outerHeight():i.outerWidth(),i=["padding",/ne|nw|n/.test(e)?"Top":/se|sw|s/.test(e)?"Bottom":/^e$/.test(e)?"Right":"Left"].join(""),t.css(i,s),this._proportionallyResize()),this._handles=this._handles.add(this.handles[e])},this._renderAxis(this.element),this._handles=this._handles.add(this.element.find(".ui-resizable-handle")),this._handles.disableSelection(),this._handles.on("mouseover",function(){a.resizing||(this.className&&(n=this.className.match(/ui-resizable-(se|sw|ne|nw|n|e|s|w)/i)),a.axis=n&&n[1]?n[1]:"se")}),o.autoHide&&(this._handles.hide(),this._addClass("ui-resizable-autohide"))},_removeHandles:function(){this._addedHandles.remove()},_mouseCapture:function(t){var e,i,s=!1;for(e in this.handles)(i=V(this.handles[e])[0])!==t.target&&!V.contains(i,t.target)||(s=!0);return!this.options.disabled&&s},_mouseStart:function(t){var e,i,s=this.options,n=this.element;return this.resizing=!0,this._renderProxy(),e=this._num(this.helper.css("left")),i=this._num(this.helper.css("top")),s.containment&&(e+=V(s.containment).scrollLeft()||0,i+=V(s.containment).scrollTop()||0),this.offset=this.helper.offset(),this.position={left:e,top:i},this.size=this._helper?{width:this.helper.width(),height:this.helper.height()}:{width:n.width(),height:n.height()},this.originalSize=this._helper?{width:n.outerWidth(),height:n.outerHeight()}:{width:n.width(),height:n.height()},this.sizeDiff={width:n.outerWidth()-n.width(),height:n.outerHeight()-n.height()},this.originalPosition={left:e,top:i},this.originalMousePosition={left:t.pageX,top:t.pageY},this.aspectRatio="number"==typeof s.aspectRatio?s.aspectRatio:this.originalSize.width/this.originalSize.height||1,s=V(".ui-resizable-"+this.axis).css("cursor"),V("body").css("cursor","auto"===s?this.axis+"-resize":s),this._addClass("ui-resizable-resizing"),this._propagate("start",t),!0},_mouseDrag:function(t){var e=this.originalMousePosition,i=this.axis,s=t.pageX-e.left||0,e=t.pageY-e.top||0,i=this._change[i];return this._updatePrevProperties(),i&&(e=i.apply(this,[t,s,e]),this._updateVirtualBoundaries(t.shiftKey),(this._aspectRatio||t.shiftKey)&&(e=this._updateRatio(e,t)),e=this._respectSize(e,t),this._updateCache(e),this._propagate("resize",t),e=this._applyChanges(),!this._helper&&this._proportionallyResizeElements.length&&this._proportionallyResize(),V.isEmptyObject(e)||(this._updatePrevProperties(),this._trigger("resize",t,this.ui()),this._applyChanges())),!1},_mouseStop:function(t){this.resizing=!1;var e,i,s,n=this.options,o=this;return this._helper&&(s=(e=(i=this._proportionallyResizeElements).length&&/textarea/i.test(i[0].nodeName))&&this._hasScroll(i[0],"left")?0:o.sizeDiff.height,i=e?0:o.sizeDiff.width,e={width:o.helper.width()-i,height:o.helper.height()-s},i=parseFloat(o.element.css("left"))+(o.position.left-o.originalPosition.left)||null,s=parseFloat(o.element.css("top"))+(o.position.top-o.originalPosition.top)||null,n.animate||this.element.css(V.extend(e,{top:s,left:i})),o.helper.height(o.size.height),o.helper.width(o.size.width),this._helper&&!n.animate&&this._proportionallyResize()),V("body").css("cursor","auto"),this._removeClass("ui-resizable-resizing"),this._propagate("stop",t),this._helper&&this.helper.remove(),!1},_updatePrevProperties:function(){this.prevPosition={top:this.position.top,left:this.position.left},this.prevSize={width:this.size.width,height:this.size.height}},_applyChanges:function(){var t={};return this.position.top!==this.prevPosition.top&&(t.top=this.position.top+"px"),this.position.left!==this.prevPosition.left&&(t.left=this.position.left+"px"),this.size.width!==this.prevSize.width&&(t.width=this.size.width+"px"),this.size.height!==this.prevSize.height&&(t.height=this.size.height+"px"),this.helper.css(t),t},_updateVirtualBoundaries:function(t){var e,i,s=this.options,n={minWidth:this._isNumber(s.minWidth)?s.minWidth:0,maxWidth:this._isNumber(s.maxWidth)?s.maxWidth:1/0,minHeight:this._isNumber(s.minHeight)?s.minHeight:0,maxHeight:this._isNumber(s.maxHeight)?s.maxHeight:1/0};(this._aspectRatio||t)&&(e=n.minHeight*this.aspectRatio,i=n.minWidth/this.aspectRatio,s=n.maxHeight*this.aspectRatio,t=n.maxWidth/this.aspectRatio,e>n.minWidth&&(n.minWidth=e),i>n.minHeight&&(n.minHeight=i),st.width,a=this._isNumber(t.height)&&e.minHeight&&e.minHeight>t.height,r=this.originalPosition.left+this.originalSize.width,l=this.originalPosition.top+this.originalSize.height,h=/sw|nw|w/.test(i),i=/nw|ne|n/.test(i);return o&&(t.width=e.minWidth),a&&(t.height=e.minHeight),s&&(t.width=e.maxWidth),n&&(t.height=e.maxHeight),o&&h&&(t.left=r-e.minWidth),s&&h&&(t.left=r-e.maxWidth),a&&i&&(t.top=l-e.minHeight),n&&i&&(t.top=l-e.maxHeight),t.width||t.height||t.left||!t.top?t.width||t.height||t.top||!t.left||(t.left=null):t.top=null,t},_getPaddingPlusBorderDimensions:function(t){for(var e=0,i=[],s=[t.css("borderTopWidth"),t.css("borderRightWidth"),t.css("borderBottomWidth"),t.css("borderLeftWidth")],n=[t.css("paddingTop"),t.css("paddingRight"),t.css("paddingBottom"),t.css("paddingLeft")];e<4;e++)i[e]=parseFloat(s[e])||0,i[e]+=parseFloat(n[e])||0;return{height:i[0]+i[2],width:i[1]+i[3]}},_proportionallyResize:function(){if(this._proportionallyResizeElements.length)for(var t,e=0,i=this.helper||this.element;e").css({overflow:"hidden"}),this._addClass(this.helper,this._helper),this.helper.css({width:this.element.outerWidth(),height:this.element.outerHeight(),position:"absolute",left:this.elementOffset.left+"px",top:this.elementOffset.top+"px",zIndex:++e.zIndex}),this.helper.appendTo("body").disableSelection()):this.helper=this.element},_change:{e:function(t,e){return{width:this.originalSize.width+e}},w:function(t,e){var i=this.originalSize;return{left:this.originalPosition.left+e,width:i.width-e}},n:function(t,e,i){var s=this.originalSize;return{top:this.originalPosition.top+i,height:s.height-i}},s:function(t,e,i){return{height:this.originalSize.height+i}},se:function(t,e,i){return V.extend(this._change.s.apply(this,arguments),this._change.e.apply(this,[t,e,i]))},sw:function(t,e,i){return V.extend(this._change.s.apply(this,arguments),this._change.w.apply(this,[t,e,i]))},ne:function(t,e,i){return V.extend(this._change.n.apply(this,arguments),this._change.e.apply(this,[t,e,i]))},nw:function(t,e,i){return V.extend(this._change.n.apply(this,arguments),this._change.w.apply(this,[t,e,i]))}},_propagate:function(t,e){V.ui.plugin.call(this,t,[e,this.ui()]),"resize"!==t&&this._trigger(t,e,this.ui())},plugins:{},ui:function(){return{originalElement:this.originalElement,element:this.element,helper:this.helper,position:this.position,size:this.size,originalSize:this.originalSize,originalPosition:this.originalPosition}}}),V.ui.plugin.add("resizable","animate",{stop:function(e){var i=V(this).resizable("instance"),t=i.options,s=i._proportionallyResizeElements,n=s.length&&/textarea/i.test(s[0].nodeName),o=n&&i._hasScroll(s[0],"left")?0:i.sizeDiff.height,a=n?0:i.sizeDiff.width,n={width:i.size.width-a,height:i.size.height-o},a=parseFloat(i.element.css("left"))+(i.position.left-i.originalPosition.left)||null,o=parseFloat(i.element.css("top"))+(i.position.top-i.originalPosition.top)||null;i.element.animate(V.extend(n,o&&a?{top:o,left:a}:{}),{duration:t.animateDuration,easing:t.animateEasing,step:function(){var t={width:parseFloat(i.element.css("width")),height:parseFloat(i.element.css("height")),top:parseFloat(i.element.css("top")),left:parseFloat(i.element.css("left"))};s&&s.length&&V(s[0]).css({width:t.width,height:t.height}),i._updateCache(t),i._propagate("resize",e)}})}}),V.ui.plugin.add("resizable","containment",{start:function(){var i,s,n=V(this).resizable("instance"),t=n.options,e=n.element,o=t.containment,a=o instanceof V?o.get(0):/parent/.test(o)?e.parent().get(0):o;a&&(n.containerElement=V(a),/document/.test(o)||o===document?(n.containerOffset={left:0,top:0},n.containerPosition={left:0,top:0},n.parentData={element:V(document),left:0,top:0,width:V(document).width(),height:V(document).height()||document.body.parentNode.scrollHeight}):(i=V(a),s=[],V(["Top","Right","Left","Bottom"]).each(function(t,e){s[t]=n._num(i.css("padding"+e))}),n.containerOffset=i.offset(),n.containerPosition=i.position(),n.containerSize={height:i.innerHeight()-s[3],width:i.innerWidth()-s[1]},t=n.containerOffset,e=n.containerSize.height,o=n.containerSize.width,o=n._hasScroll(a,"left")?a.scrollWidth:o,e=n._hasScroll(a)?a.scrollHeight:e,n.parentData={element:a,left:t.left,top:t.top,width:o,height:e}))},resize:function(t){var e=V(this).resizable("instance"),i=e.options,s=e.containerOffset,n=e.position,o=e._aspectRatio||t.shiftKey,a={top:0,left:0},r=e.containerElement,t=!0;r[0]!==document&&/static/.test(r.css("position"))&&(a=s),n.left<(e._helper?s.left:0)&&(e.size.width=e.size.width+(e._helper?e.position.left-s.left:e.position.left-a.left),o&&(e.size.height=e.size.width/e.aspectRatio,t=!1),e.position.left=i.helper?s.left:0),n.top<(e._helper?s.top:0)&&(e.size.height=e.size.height+(e._helper?e.position.top-s.top:e.position.top),o&&(e.size.width=e.size.height*e.aspectRatio,t=!1),e.position.top=e._helper?s.top:0),i=e.containerElement.get(0)===e.element.parent().get(0),n=/relative|absolute/.test(e.containerElement.css("position")),i&&n?(e.offset.left=e.parentData.left+e.position.left,e.offset.top=e.parentData.top+e.position.top):(e.offset.left=e.element.offset().left,e.offset.top=e.element.offset().top),n=Math.abs(e.sizeDiff.width+(e._helper?e.offset.left-a.left:e.offset.left-s.left)),s=Math.abs(e.sizeDiff.height+(e._helper?e.offset.top-a.top:e.offset.top-s.top)),n+e.size.width>=e.parentData.width&&(e.size.width=e.parentData.width-n,o&&(e.size.height=e.size.width/e.aspectRatio,t=!1)),s+e.size.height>=e.parentData.height&&(e.size.height=e.parentData.height-s,o&&(e.size.width=e.size.height*e.aspectRatio,t=!1)),t||(e.position.left=e.prevPosition.left,e.position.top=e.prevPosition.top,e.size.width=e.prevSize.width,e.size.height=e.prevSize.height)},stop:function(){var t=V(this).resizable("instance"),e=t.options,i=t.containerOffset,s=t.containerPosition,n=t.containerElement,o=V(t.helper),a=o.offset(),r=o.outerWidth()-t.sizeDiff.width,o=o.outerHeight()-t.sizeDiff.height;t._helper&&!e.animate&&/relative/.test(n.css("position"))&&V(this).css({left:a.left-s.left-i.left,width:r,height:o}),t._helper&&!e.animate&&/static/.test(n.css("position"))&&V(this).css({left:a.left-s.left-i.left,width:r,height:o})}}),V.ui.plugin.add("resizable","alsoResize",{start:function(){var t=V(this).resizable("instance").options;V(t.alsoResize).each(function(){var t=V(this);t.data("ui-resizable-alsoresize",{width:parseFloat(t.width()),height:parseFloat(t.height()),left:parseFloat(t.css("left")),top:parseFloat(t.css("top"))})})},resize:function(t,i){var e=V(this).resizable("instance"),s=e.options,n=e.originalSize,o=e.originalPosition,a={height:e.size.height-n.height||0,width:e.size.width-n.width||0,top:e.position.top-o.top||0,left:e.position.left-o.left||0};V(s.alsoResize).each(function(){var t=V(this),s=V(this).data("ui-resizable-alsoresize"),n={},e=t.parents(i.originalElement[0]).length?["width","height"]:["width","height","top","left"];V.each(e,function(t,e){var i=(s[e]||0)+(a[e]||0);i&&0<=i&&(n[e]=i||null)}),t.css(n)})},stop:function(){V(this).removeData("ui-resizable-alsoresize")}}),V.ui.plugin.add("resizable","ghost",{start:function(){var t=V(this).resizable("instance"),e=t.size;t.ghost=t.originalElement.clone(),t.ghost.css({opacity:.25,display:"block",position:"relative",height:e.height,width:e.width,margin:0,left:0,top:0}),t._addClass(t.ghost,"ui-resizable-ghost"),!1!==V.uiBackCompat&&"string"==typeof t.options.ghost&&t.ghost.addClass(this.options.ghost),t.ghost.appendTo(t.helper)},resize:function(){var t=V(this).resizable("instance");t.ghost&&t.ghost.css({position:"relative",height:t.size.height,width:t.size.width})},stop:function(){var t=V(this).resizable("instance");t.ghost&&t.helper&&t.helper.get(0).removeChild(t.ghost.get(0))}}),V.ui.plugin.add("resizable","grid",{resize:function(){var t,e=V(this).resizable("instance"),i=e.options,s=e.size,n=e.originalSize,o=e.originalPosition,a=e.axis,r="number"==typeof i.grid?[i.grid,i.grid]:i.grid,l=r[0]||1,h=r[1]||1,c=Math.round((s.width-n.width)/l)*l,u=Math.round((s.height-n.height)/h)*h,d=n.width+c,p=n.height+u,f=i.maxWidth&&i.maxWidthd,s=i.minHeight&&i.minHeight>p;i.grid=r,m&&(d+=l),s&&(p+=h),f&&(d-=l),g&&(p-=h),/^(se|s|e)$/.test(a)?(e.size.width=d,e.size.height=p):/^(ne)$/.test(a)?(e.size.width=d,e.size.height=p,e.position.top=o.top-u):/^(sw)$/.test(a)?(e.size.width=d,e.size.height=p,e.position.left=o.left-c):((p-h<=0||d-l<=0)&&(t=e._getPaddingPlusBorderDimensions(this)),0"),this._addClass(this.helper,"ui-selectable-helper")},_destroy:function(){this.selectees.removeData("selectable-item"),this._mouseDestroy()},_mouseStart:function(i){var s=this,t=this.options;this.opos=[i.pageX,i.pageY],this.elementPos=V(this.element[0]).offset(),this.options.disabled||(this.selectees=V(t.filter,this.element[0]),this._trigger("start",i),V(t.appendTo).append(this.helper),this.helper.css({left:i.pageX,top:i.pageY,width:0,height:0}),t.autoRefresh&&this.refresh(),this.selectees.filter(".ui-selected").each(function(){var t=V.data(this,"selectable-item");t.startselected=!0,i.metaKey||i.ctrlKey||(s._removeClass(t.$element,"ui-selected"),t.selected=!1,s._addClass(t.$element,"ui-unselecting"),t.unselecting=!0,s._trigger("unselecting",i,{unselecting:t.element}))}),V(i.target).parents().addBack().each(function(){var t,e=V.data(this,"selectable-item");if(e)return t=!i.metaKey&&!i.ctrlKey||!e.$element.hasClass("ui-selected"),s._removeClass(e.$element,t?"ui-unselecting":"ui-selected")._addClass(e.$element,t?"ui-selecting":"ui-unselecting"),e.unselecting=!t,e.selecting=t,(e.selected=t)?s._trigger("selecting",i,{selecting:e.element}):s._trigger("unselecting",i,{unselecting:e.element}),!1}))},_mouseDrag:function(s){if(this.dragged=!0,!this.options.disabled){var t,n=this,o=this.options,a=this.opos[0],r=this.opos[1],l=s.pageX,h=s.pageY;return ll||i.righth||i.bottoma&&i.rightr&&i.bottom *",opacity:!1,placeholder:!1,revert:!1,scroll:!0,scrollSensitivity:20,scrollSpeed:20,scope:"default",tolerance:"intersect",zIndex:1e3,activate:null,beforeStop:null,change:null,deactivate:null,out:null,over:null,receive:null,remove:null,sort:null,start:null,stop:null,update:null},_isOverAxis:function(t,e,i){return e<=t&&t*{ cursor: "+o.cursor+" !important; }").appendTo(n)),o.zIndex&&(this.helper.css("zIndex")&&(this._storedZIndex=this.helper.css("zIndex")),this.helper.css("zIndex",o.zIndex)),o.opacity&&(this.helper.css("opacity")&&(this._storedOpacity=this.helper.css("opacity")),this.helper.css("opacity",o.opacity)),this.scrollParent[0]!==this.document[0]&&"HTML"!==this.scrollParent[0].tagName&&(this.overflowOffset=this.scrollParent.offset()),this._trigger("start",t,this._uiHash()),this._preserveHelperProportions||this._cacheHelperProportions(),!i)for(s=this.containers.length-1;0<=s;s--)this.containers[s]._trigger("activate",t,this._uiHash(this));return V.ui.ddmanager&&(V.ui.ddmanager.current=this),V.ui.ddmanager&&!o.dropBehaviour&&V.ui.ddmanager.prepareOffsets(this,t),this.dragging=!0,this._addClass(this.helper,"ui-sortable-helper"),this.helper.parent().is(this.appendTo)||(this.helper.detach().appendTo(this.appendTo),this.offset.parent=this._getParentOffset()),this.position=this.originalPosition=this._generatePosition(t),this.originalPageX=t.pageX,this.originalPageY=t.pageY,this.lastPositionAbs=this.positionAbs=this._convertPositionTo("absolute"),this._mouseDrag(t),!0},_scroll:function(t){var e=this.options,i=!1;return this.scrollParent[0]!==this.document[0]&&"HTML"!==this.scrollParent[0].tagName?(this.overflowOffset.top+this.scrollParent[0].offsetHeight-t.pageYt[this.floating?"width":"height"]?h&&c:o",i.document[0]);return i._addClass(t,"ui-sortable-placeholder",s||i.currentItem[0].className)._removeClass(t,"ui-sortable-helper"),"tbody"===n?i._createTrPlaceholder(i.currentItem.find("tr").eq(0),V("",i.document[0]).appendTo(t)):"tr"===n?i._createTrPlaceholder(i.currentItem,t):"img"===n&&t.attr("src",i.currentItem.attr("src")),s||t.css("visibility","hidden"),t},update:function(t,e){s&&!o.forcePlaceholderSize||(e.height()&&(!o.forcePlaceholderSize||"tbody"!==n&&"tr"!==n)||e.height(i.currentItem.innerHeight()-parseInt(i.currentItem.css("paddingTop")||0,10)-parseInt(i.currentItem.css("paddingBottom")||0,10)),e.width()||e.width(i.currentItem.innerWidth()-parseInt(i.currentItem.css("paddingLeft")||0,10)-parseInt(i.currentItem.css("paddingRight")||0,10)))}}),i.placeholder=V(o.placeholder.element.call(i.element,i.currentItem)),i.currentItem.after(i.placeholder),o.placeholder.update(i,i.placeholder)},_createTrPlaceholder:function(t,e){var i=this;t.children().each(function(){V(" ",i.document[0]).attr("colspan",V(this).attr("colspan")||1).appendTo(e)})},_contactContainers:function(t){for(var e,i,s,n,o,a,r,l,h,c=null,u=null,d=this.containers.length-1;0<=d;d--)V.contains(this.currentItem[0],this.containers[d].element[0])||(this._intersectsWith(this.containers[d].containerCache)?c&&V.contains(this.containers[d].element[0],c.element[0])||(c=this.containers[d],u=d):this.containers[d].containerCache.over&&(this.containers[d]._trigger("out",t,this._uiHash(this)),this.containers[d].containerCache.over=0));if(c)if(1===this.containers.length)this.containers[u].containerCache.over||(this.containers[u]._trigger("over",t,this._uiHash(this)),this.containers[u].containerCache.over=1);else{for(i=1e4,s=null,n=(l=c.floating||this._isFloating(this.currentItem))?"left":"top",o=l?"width":"height",h=l?"pageX":"pageY",e=this.items.length-1;0<=e;e--)V.contains(this.containers[u].element[0],this.items[e].item[0])&&this.items[e].item[0]!==this.currentItem[0]&&(a=this.items[e].item.offset()[n],r=!1,t[h]-a>this.items[e][o]/2&&(r=!0),Math.abs(t[h]-a)this.containment[2]&&(i=this.containment[2]+this.offset.click.left),t.pageY-this.offset.click.top>this.containment[3]&&(s=this.containment[3]+this.offset.click.top)),e.grid&&(t=this.originalPageY+Math.round((s-this.originalPageY)/e.grid[1])*e.grid[1],s=!this.containment||t-this.offset.click.top>=this.containment[1]&&t-this.offset.click.top<=this.containment[3]?t:t-this.offset.click.top>=this.containment[1]?t-e.grid[1]:t+e.grid[1],t=this.originalPageX+Math.round((i-this.originalPageX)/e.grid[0])*e.grid[0],i=!this.containment||t-this.offset.click.left>=this.containment[0]&&t-this.offset.click.left<=this.containment[2]?t:t-this.offset.click.left>=this.containment[0]?t-e.grid[0]:t+e.grid[0])),{top:s-this.offset.click.top-this.offset.relative.top-this.offset.parent.top+("fixed"===this.cssPosition?-this.scrollParent.scrollTop():o?0:n.scrollTop()),left:i-this.offset.click.left-this.offset.relative.left-this.offset.parent.left+("fixed"===this.cssPosition?-this.scrollParent.scrollLeft():o?0:n.scrollLeft())}},_rearrange:function(t,e,i,s){i?i[0].appendChild(this.placeholder[0]):e.item[0].parentNode.insertBefore(this.placeholder[0],"down"===this.direction?e.item[0]:e.item[0].nextSibling),this.counter=this.counter?++this.counter:1;var n=this.counter;this._delay(function(){n===this.counter&&this.refreshPositions(!s)})},_clear:function(t,e){this.reverting=!1;var i,s=[];if(!this._noFinalSort&&this.currentItem.parent().length&&this.placeholder.before(this.currentItem),this._noFinalSort=null,this.helper[0]===this.currentItem[0]){for(i in this._storedCSS)"auto"!==this._storedCSS[i]&&"static"!==this._storedCSS[i]||(this._storedCSS[i]="");this.currentItem.css(this._storedCSS),this._removeClass(this.currentItem,"ui-sortable-helper")}else this.currentItem.show();function n(e,i,s){return function(t){s._trigger(e,t,i._uiHash(i))}}for(this.fromOutside&&!e&&s.push(function(t){this._trigger("receive",t,this._uiHash(this.fromOutside))}),!this.fromOutside&&this.domPosition.prev===this.currentItem.prev().not(".ui-sortable-helper")[0]&&this.domPosition.parent===this.currentItem.parent()[0]||e||s.push(function(t){this._trigger("update",t,this._uiHash())}),this!==this.currentContainer&&(e||(s.push(function(t){this._trigger("remove",t,this._uiHash())}),s.push(function(e){return function(t){e._trigger("receive",t,this._uiHash(this))}}.call(this,this.currentContainer)),s.push(function(e){return function(t){e._trigger("update",t,this._uiHash(this))}}.call(this,this.currentContainer)))),i=this.containers.length-1;0<=i;i--)e||s.push(n("deactivate",this,this.containers[i])),this.containers[i].containerCache.over&&(s.push(n("out",this,this.containers[i])),this.containers[i].containerCache.over=0);if(this.storedCursor&&(this.document.find("body").css("cursor",this.storedCursor),this.storedStylesheet.remove()),this._storedOpacity&&this.helper.css("opacity",this._storedOpacity),this._storedZIndex&&this.helper.css("zIndex","auto"===this._storedZIndex?"":this._storedZIndex),this.dragging=!1,e||this._trigger("beforeStop",t,this._uiHash()),this.placeholder[0].parentNode.removeChild(this.placeholder[0]),this.cancelHelperRemoval||(this.helper[0]!==this.currentItem[0]&&this.helper.remove(),this.helper=null),!e){for(i=0;i li > :first-child").add(t.find("> :not(li)").even())},heightStyle:"auto",icons:{activeHeader:"ui-icon-triangle-1-s",header:"ui-icon-triangle-1-e"},activate:null,beforeActivate:null},hideProps:{borderTopWidth:"hide",borderBottomWidth:"hide",paddingTop:"hide",paddingBottom:"hide",height:"hide"},showProps:{borderTopWidth:"show",borderBottomWidth:"show",paddingTop:"show",paddingBottom:"show",height:"show"},_create:function(){var t=this.options;this.prevShow=this.prevHide=V(),this._addClass("ui-accordion","ui-widget ui-helper-reset"),this.element.attr("role","tablist"),t.collapsible||!1!==t.active&&null!=t.active||(t.active=0),this._processPanels(),t.active<0&&(t.active+=this.headers.length),this._refresh()},_getCreateEventData:function(){return{header:this.active,panel:this.active.length?this.active.next():V()}},_createIcons:function(){var t,e=this.options.icons;e&&(t=V(""),this._addClass(t,"ui-accordion-header-icon","ui-icon "+e.header),t.prependTo(this.headers),t=this.active.children(".ui-accordion-header-icon"),this._removeClass(t,e.header)._addClass(t,null,e.activeHeader)._addClass(this.headers,"ui-accordion-icons"))},_destroyIcons:function(){this._removeClass(this.headers,"ui-accordion-icons"),this.headers.children(".ui-accordion-header-icon").remove()},_destroy:function(){var t;this.element.removeAttr("role"),this.headers.removeAttr("role aria-expanded aria-selected aria-controls tabIndex").removeUniqueId(),this._destroyIcons(),t=this.headers.next().css("display","").removeAttr("role aria-hidden aria-labelledby").removeUniqueId(),"content"!==this.options.heightStyle&&t.css("height","")},_setOption:function(t,e){"active"!==t?("event"===t&&(this.options.event&&this._off(this.headers,this.options.event),this._setupEvents(e)),this._super(t,e),"collapsible"!==t||e||!1!==this.options.active||this._activate(0),"icons"===t&&(this._destroyIcons(),e&&this._createIcons())):this._activate(e)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t),this._toggleClass(null,"ui-state-disabled",!!t),this._toggleClass(this.headers.add(this.headers.next()),null,"ui-state-disabled",!!t)},_keydown:function(t){if(!t.altKey&&!t.ctrlKey){var e=V.ui.keyCode,i=this.headers.length,s=this.headers.index(t.target),n=!1;switch(t.keyCode){case e.RIGHT:case e.DOWN:n=this.headers[(s+1)%i];break;case e.LEFT:case e.UP:n=this.headers[(s-1+i)%i];break;case e.SPACE:case e.ENTER:this._eventHandler(t);break;case e.HOME:n=this.headers[0];break;case e.END:n=this.headers[i-1]}n&&(V(t.target).attr("tabIndex",-1),V(n).attr("tabIndex",0),V(n).trigger("focus"),t.preventDefault())}},_panelKeyDown:function(t){t.keyCode===V.ui.keyCode.UP&&t.ctrlKey&&V(t.currentTarget).prev().trigger("focus")},refresh:function(){var t=this.options;this._processPanels(),!1===t.active&&!0===t.collapsible||!this.headers.length?(t.active=!1,this.active=V()):!1===t.active?this._activate(0):this.active.length&&!V.contains(this.element[0],this.active[0])?this.headers.length===this.headers.find(".ui-state-disabled").length?(t.active=!1,this.active=V()):this._activate(Math.max(0,t.active-1)):t.active=this.headers.index(this.active),this._destroyIcons(),this._refresh()},_processPanels:function(){var t=this.headers,e=this.panels;"function"==typeof this.options.header?this.headers=this.options.header(this.element):this.headers=this.element.find(this.options.header),this._addClass(this.headers,"ui-accordion-header ui-accordion-header-collapsed","ui-state-default"),this.panels=this.headers.next().filter(":not(.ui-accordion-content-active)").hide(),this._addClass(this.panels,"ui-accordion-content","ui-helper-reset ui-widget-content"),e&&(this._off(t.not(this.headers)),this._off(e.not(this.panels)))},_refresh:function(){var i,t=this.options,e=t.heightStyle,s=this.element.parent();this.active=this._findActive(t.active),this._addClass(this.active,"ui-accordion-header-active","ui-state-active")._removeClass(this.active,"ui-accordion-header-collapsed"),this._addClass(this.active.next(),"ui-accordion-content-active"),this.active.next().show(),this.headers.attr("role","tab").each(function(){var t=V(this),e=t.uniqueId().attr("id"),i=t.next(),s=i.uniqueId().attr("id");t.attr("aria-controls",s),i.attr("aria-labelledby",e)}).next().attr("role","tabpanel"),this.headers.not(this.active).attr({"aria-selected":"false","aria-expanded":"false",tabIndex:-1}).next().attr({"aria-hidden":"true"}).hide(),this.active.length?this.active.attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0}).next().attr({"aria-hidden":"false"}):this.headers.eq(0).attr("tabIndex",0),this._createIcons(),this._setupEvents(t.event),"fill"===e?(i=s.height(),this.element.siblings(":visible").each(function(){var t=V(this),e=t.css("position");"absolute"!==e&&"fixed"!==e&&(i-=t.outerHeight(!0))}),this.headers.each(function(){i-=V(this).outerHeight(!0)}),this.headers.next().each(function(){V(this).height(Math.max(0,i-V(this).innerHeight()+V(this).height()))}).css("overflow","auto")):"auto"===e&&(i=0,this.headers.next().each(function(){var t=V(this).is(":visible");t||V(this).show(),i=Math.max(i,V(this).css("height","").height()),t||V(this).hide()}).height(i))},_activate:function(t){t=this._findActive(t)[0];t!==this.active[0]&&(t=t||this.active[0],this._eventHandler({target:t,currentTarget:t,preventDefault:V.noop}))},_findActive:function(t){return"number"==typeof t?this.headers.eq(t):V()},_setupEvents:function(t){var i={keydown:"_keydown"};t&&V.each(t.split(" "),function(t,e){i[e]="_eventHandler"}),this._off(this.headers.add(this.headers.next())),this._on(this.headers,i),this._on(this.headers.next(),{keydown:"_panelKeyDown"}),this._hoverable(this.headers),this._focusable(this.headers)},_eventHandler:function(t){var e=this.options,i=this.active,s=V(t.currentTarget),n=s[0]===i[0],o=n&&e.collapsible,a=o?V():s.next(),r=i.next(),a={oldHeader:i,oldPanel:r,newHeader:o?V():s,newPanel:a};t.preventDefault(),n&&!e.collapsible||!1===this._trigger("beforeActivate",t,a)||(e.active=!o&&this.headers.index(s),this.active=n?V():s,this._toggle(a),this._removeClass(i,"ui-accordion-header-active","ui-state-active"),e.icons&&(i=i.children(".ui-accordion-header-icon"),this._removeClass(i,null,e.icons.activeHeader)._addClass(i,null,e.icons.header)),n||(this._removeClass(s,"ui-accordion-header-collapsed")._addClass(s,"ui-accordion-header-active","ui-state-active"),e.icons&&(n=s.children(".ui-accordion-header-icon"),this._removeClass(n,null,e.icons.header)._addClass(n,null,e.icons.activeHeader)),this._addClass(s.next(),"ui-accordion-content-active")))},_toggle:function(t){var e=t.newPanel,i=this.prevShow.length?this.prevShow:t.oldPanel;this.prevShow.add(this.prevHide).stop(!0,!0),this.prevShow=e,this.prevHide=i,this.options.animate?this._animate(e,i,t):(i.hide(),e.show(),this._toggleComplete(t)),i.attr({"aria-hidden":"true"}),i.prev().attr({"aria-selected":"false","aria-expanded":"false"}),e.length&&i.length?i.prev().attr({tabIndex:-1,"aria-expanded":"false"}):e.length&&this.headers.filter(function(){return 0===parseInt(V(this).attr("tabIndex"),10)}).attr("tabIndex",-1),e.attr("aria-hidden","false").prev().attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0})},_animate:function(t,i,e){var s,n,o,a=this,r=0,l=t.css("box-sizing"),h=t.length&&(!i.length||t.index()",delay:300,options:{icons:{submenu:"ui-icon-caret-1-e"},items:"> *",menus:"ul",position:{my:"left top",at:"right top"},role:"menu",blur:null,focus:null,select:null},_create:function(){this.activeMenu=this.element,this.mouseHandled=!1,this.lastMousePosition={x:null,y:null},this.element.uniqueId().attr({role:this.options.role,tabIndex:0}),this._addClass("ui-menu","ui-widget ui-widget-content"),this._on({"mousedown .ui-menu-item":function(t){t.preventDefault(),this._activateItem(t)},"click .ui-menu-item":function(t){var e=V(t.target),i=V(V.ui.safeActiveElement(this.document[0]));!this.mouseHandled&&e.not(".ui-state-disabled").length&&(this.select(t),t.isPropagationStopped()||(this.mouseHandled=!0),e.has(".ui-menu").length?this.expand(t):!this.element.is(":focus")&&i.closest(".ui-menu").length&&(this.element.trigger("focus",[!0]),this.active&&1===this.active.parents(".ui-menu").length&&clearTimeout(this.timer)))},"mouseenter .ui-menu-item":"_activateItem","mousemove .ui-menu-item":"_activateItem",mouseleave:"collapseAll","mouseleave .ui-menu":"collapseAll",focus:function(t,e){var i=this.active||this._menuItems().first();e||this.focus(t,i)},blur:function(t){this._delay(function(){V.contains(this.element[0],V.ui.safeActiveElement(this.document[0]))||this.collapseAll(t)})},keydown:"_keydown"}),this.refresh(),this._on(this.document,{click:function(t){this._closeOnDocumentClick(t)&&this.collapseAll(t,!0),this.mouseHandled=!1}})},_activateItem:function(t){var e,i;this.previousFilter||t.clientX===this.lastMousePosition.x&&t.clientY===this.lastMousePosition.y||(this.lastMousePosition={x:t.clientX,y:t.clientY},e=V(t.target).closest(".ui-menu-item"),i=V(t.currentTarget),e[0]===i[0]&&(i.is(".ui-state-active")||(this._removeClass(i.siblings().children(".ui-state-active"),null,"ui-state-active"),this.focus(t,i))))},_destroy:function(){var t=this.element.find(".ui-menu-item").removeAttr("role aria-disabled").children(".ui-menu-item-wrapper").removeUniqueId().removeAttr("tabIndex role aria-haspopup");this.element.removeAttr("aria-activedescendant").find(".ui-menu").addBack().removeAttr("role aria-labelledby aria-expanded aria-hidden aria-disabled tabIndex").removeUniqueId().show(),t.children().each(function(){var t=V(this);t.data("ui-menu-submenu-caret")&&t.remove()})},_keydown:function(t){var e,i,s,n=!0;switch(t.keyCode){case V.ui.keyCode.PAGE_UP:this.previousPage(t);break;case V.ui.keyCode.PAGE_DOWN:this.nextPage(t);break;case V.ui.keyCode.HOME:this._move("first","first",t);break;case V.ui.keyCode.END:this._move("last","last",t);break;case V.ui.keyCode.UP:this.previous(t);break;case V.ui.keyCode.DOWN:this.next(t);break;case V.ui.keyCode.LEFT:this.collapse(t);break;case V.ui.keyCode.RIGHT:this.active&&!this.active.is(".ui-state-disabled")&&this.expand(t);break;case V.ui.keyCode.ENTER:case V.ui.keyCode.SPACE:this._activate(t);break;case V.ui.keyCode.ESCAPE:this.collapse(t);break;default:e=this.previousFilter||"",s=n=!1,i=96<=t.keyCode&&t.keyCode<=105?(t.keyCode-96).toString():String.fromCharCode(t.keyCode),clearTimeout(this.filterTimer),i===e?s=!0:i=e+i,e=this._filterMenuItems(i),(e=s&&-1!==e.index(this.active.next())?this.active.nextAll(".ui-menu-item"):e).length||(i=String.fromCharCode(t.keyCode),e=this._filterMenuItems(i)),e.length?(this.focus(t,e),this.previousFilter=i,this.filterTimer=this._delay(function(){delete this.previousFilter},1e3)):delete this.previousFilter}n&&t.preventDefault()},_activate:function(t){this.active&&!this.active.is(".ui-state-disabled")&&(this.active.children("[aria-haspopup='true']").length?this.expand(t):this.select(t))},refresh:function(){var t,e,s=this,n=this.options.icons.submenu,i=this.element.find(this.options.menus);this._toggleClass("ui-menu-icons",null,!!this.element.find(".ui-icon").length),e=i.filter(":not(.ui-menu)").hide().attr({role:this.options.role,"aria-hidden":"true","aria-expanded":"false"}).each(function(){var t=V(this),e=t.prev(),i=V("").data("ui-menu-submenu-caret",!0);s._addClass(i,"ui-menu-icon","ui-icon "+n),e.attr("aria-haspopup","true").prepend(i),t.attr("aria-labelledby",e.attr("id"))}),this._addClass(e,"ui-menu","ui-widget ui-widget-content ui-front"),(t=i.add(this.element).find(this.options.items)).not(".ui-menu-item").each(function(){var t=V(this);s._isDivider(t)&&s._addClass(t,"ui-menu-divider","ui-widget-content")}),i=(e=t.not(".ui-menu-item, .ui-menu-divider")).children().not(".ui-menu").uniqueId().attr({tabIndex:-1,role:this._itemRole()}),this._addClass(e,"ui-menu-item")._addClass(i,"ui-menu-item-wrapper"),t.filter(".ui-state-disabled").attr("aria-disabled","true"),this.active&&!V.contains(this.element[0],this.active[0])&&this.blur()},_itemRole:function(){return{menu:"menuitem",listbox:"option"}[this.options.role]},_setOption:function(t,e){var i;"icons"===t&&(i=this.element.find(".ui-menu-icon"),this._removeClass(i,null,this.options.icons.submenu)._addClass(i,null,e.submenu)),this._super(t,e)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",String(t)),this._toggleClass(null,"ui-state-disabled",!!t)},focus:function(t,e){var i;this.blur(t,t&&"focus"===t.type),this._scrollIntoView(e),this.active=e.first(),i=this.active.children(".ui-menu-item-wrapper"),this._addClass(i,null,"ui-state-active"),this.options.role&&this.element.attr("aria-activedescendant",i.attr("id")),i=this.active.parent().closest(".ui-menu-item").children(".ui-menu-item-wrapper"),this._addClass(i,null,"ui-state-active"),t&&"keydown"===t.type?this._close():this.timer=this._delay(function(){this._close()},this.delay),(i=e.children(".ui-menu")).length&&t&&/^mouse/.test(t.type)&&this._startOpening(i),this.activeMenu=e.parent(),this._trigger("focus",t,{item:e})},_scrollIntoView:function(t){var e,i,s;this._hasScroll()&&(i=parseFloat(V.css(this.activeMenu[0],"borderTopWidth"))||0,s=parseFloat(V.css(this.activeMenu[0],"paddingTop"))||0,e=t.offset().top-this.activeMenu.offset().top-i-s,i=this.activeMenu.scrollTop(),s=this.activeMenu.height(),t=t.outerHeight(),e<0?this.activeMenu.scrollTop(i+e):s",options:{appendTo:null,autoFocus:!1,delay:300,minLength:1,position:{my:"left top",at:"left bottom",collision:"none"},source:null,change:null,close:null,focus:null,open:null,response:null,search:null,select:null},requestIndex:0,pending:0,liveRegionTimer:null,_create:function(){var i,s,n,t=this.element[0].nodeName.toLowerCase(),e="textarea"===t,t="input"===t;this.isMultiLine=e||!t&&this._isContentEditable(this.element),this.valueMethod=this.element[e||t?"val":"text"],this.isNewMenu=!0,this._addClass("ui-autocomplete-input"),this.element.attr("autocomplete","off"),this._on(this.element,{keydown:function(t){if(this.element.prop("readOnly"))s=n=i=!0;else{s=n=i=!1;var e=V.ui.keyCode;switch(t.keyCode){case e.PAGE_UP:i=!0,this._move("previousPage",t);break;case e.PAGE_DOWN:i=!0,this._move("nextPage",t);break;case e.UP:i=!0,this._keyEvent("previous",t);break;case e.DOWN:i=!0,this._keyEvent("next",t);break;case e.ENTER:this.menu.active&&(i=!0,t.preventDefault(),this.menu.select(t));break;case e.TAB:this.menu.active&&this.menu.select(t);break;case e.ESCAPE:this.menu.element.is(":visible")&&(this.isMultiLine||this._value(this.term),this.close(t),t.preventDefault());break;default:s=!0,this._searchTimeout(t)}}},keypress:function(t){if(i)return i=!1,void(this.isMultiLine&&!this.menu.element.is(":visible")||t.preventDefault());if(!s){var e=V.ui.keyCode;switch(t.keyCode){case e.PAGE_UP:this._move("previousPage",t);break;case e.PAGE_DOWN:this._move("nextPage",t);break;case e.UP:this._keyEvent("previous",t);break;case e.DOWN:this._keyEvent("next",t)}}},input:function(t){if(n)return n=!1,void t.preventDefault();this._searchTimeout(t)},focus:function(){this.selectedItem=null,this.previous=this._value()},blur:function(t){clearTimeout(this.searching),this.close(t),this._change(t)}}),this._initSource(),this.menu=V("