From 094cfd228dd026a60592538a2be003b86ad65275 Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Thu, 6 Nov 2014 10:03:26 +0530 Subject: [PATCH 001/432] HDFS-7347. Configurable erasure coding policy for individual files and directories ( Contributed by Zhe Zhang ) --- .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 4 + .../hadoop/hdfs/protocol/HdfsConstants.java | 2 + .../BlockStoragePolicySuite.java | 5 ++ .../hadoop/hdfs/TestBlockStoragePolicy.java | 12 ++- .../TestBlockInitialEncoding.java | 75 +++++++++++++++++++ 5 files changed, 95 insertions(+), 3 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInitialEncoding.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt new file mode 100644 index 0000000000000..2ef8527334aae --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt @@ -0,0 +1,4 @@ + BREAKDOWN OF HDFS-7285 SUBTASKS AND RELATED JIRAS + + HDFS-7347. Configurable erasure coding policy for individual files and + directories ( Zhe Zhang via vinayakumarb ) \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index 54da8ebe8d4a9..8b3dbd042ef95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -170,6 +170,7 @@ public static enum DatanodeReportType { public static final String ONESSD_STORAGE_POLICY_NAME = "ONE_SSD"; public static final String HOT_STORAGE_POLICY_NAME = "HOT"; public static final String WARM_STORAGE_POLICY_NAME = "WARM"; + public static final String EC_STORAGE_POLICY_NAME = "EC"; public static final String COLD_STORAGE_POLICY_NAME = "COLD"; public static final byte MEMORY_STORAGE_POLICY_ID = 15; @@ -177,5 +178,6 @@ public static enum DatanodeReportType { public static final byte ONESSD_STORAGE_POLICY_ID = 10; public static final byte HOT_STORAGE_POLICY_ID = 7; public static final byte WARM_STORAGE_POLICY_ID = 5; + public static final byte EC_STORAGE_POLICY_ID = 4; public static final byte COLD_STORAGE_POLICY_ID = 2; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java index ce87b06fc7298..c81dc5b135369 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java @@ -77,6 +77,11 @@ public static BlockStoragePolicySuite createDefaultSuite() { new StorageType[]{StorageType.DISK, StorageType.ARCHIVE}, new StorageType[]{StorageType.DISK, StorageType.ARCHIVE}, new StorageType[]{StorageType.DISK, StorageType.ARCHIVE}); + final byte ecId = HdfsConstants.EC_STORAGE_POLICY_ID; + policies[ecId] = new BlockStoragePolicy(ecId, + HdfsConstants.EC_STORAGE_POLICY_NAME, + new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY, + new StorageType[]{StorageType.ARCHIVE}); final byte coldId = HdfsConstants.COLD_STORAGE_POLICY_ID; policies[coldId] = new BlockStoragePolicy(coldId, HdfsConstants.COLD_STORAGE_POLICY_NAME, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index 3d417e65ee023..b8810470e4195 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -67,6 +67,7 @@ public class TestBlockStoragePolicy { static final short REPLICATION = 3; static final byte COLD = HdfsConstants.COLD_STORAGE_POLICY_ID; + static final byte EC = HdfsConstants.EC_STORAGE_POLICY_ID; static final byte WARM = HdfsConstants.WARM_STORAGE_POLICY_ID; static final byte HOT = HdfsConstants.HOT_STORAGE_POLICY_ID; static final byte ONESSD = HdfsConstants.ONESSD_STORAGE_POLICY_ID; @@ -114,6 +115,9 @@ public void testDefaultPolicies() { expectedPolicyStrings.put(COLD, "BlockStoragePolicy{COLD:" + COLD + ", storageTypes=[ARCHIVE], " + "creationFallbacks=[], replicationFallbacks=[]}"); + expectedPolicyStrings.put(EC, + "BlockStoragePolicy{EC:" + EC + ", storageTypes=[DISK], " + + "creationFallbacks=[], replicationFallbacks=[ARCHIVE]}"); expectedPolicyStrings.put(WARM, "BlockStoragePolicy{WARM:" + WARM + ", storageTypes=[DISK, ARCHIVE], " + "creationFallbacks=[DISK, ARCHIVE], " + @@ -1156,13 +1160,15 @@ public void testGetAllStoragePolicies() throws Exception { final DistributedFileSystem fs = cluster.getFileSystem(); try { BlockStoragePolicy[] policies = fs.getStoragePolicies(); - Assert.assertEquals(6, policies.length); + Assert.assertEquals(7, policies.length); Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(), policies[0].toString()); - Assert.assertEquals(POLICY_SUITE.getPolicy(WARM).toString(), + Assert.assertEquals(POLICY_SUITE.getPolicy(EC).toString(), policies[1].toString()); - Assert.assertEquals(POLICY_SUITE.getPolicy(HOT).toString(), + Assert.assertEquals(POLICY_SUITE.getPolicy(WARM).toString(), policies[2].toString()); + Assert.assertEquals(POLICY_SUITE.getPolicy(HOT).toString(), + policies[3].toString()); } finally { IOUtils.cleanup(null, fs); cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInitialEncoding.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInitialEncoding.java new file mode 100644 index 0000000000000..a84f67b232b0c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInitialEncoding.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.*; +import org.apache.hadoop.hdfs.client.HdfsAdmin; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.INode; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; + +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.EC_STORAGE_POLICY_NAME; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.EC_STORAGE_POLICY_ID; +import static org.junit.Assert.assertEquals; + +public class TestBlockInitialEncoding { + private final int NUM_OF_DATANODES = 3; + private Configuration conf; + private MiniDFSCluster cluster; + private DistributedFileSystem fs; + private static final int BLOCK_SIZE = 1024; + private HdfsAdmin dfsAdmin; + private FSNamesystem namesystem; + + @Before + public void setupCluster() throws IOException { + conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); + cluster = new MiniDFSCluster.Builder(conf). + numDataNodes(NUM_OF_DATANODES).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); + namesystem = cluster.getNamesystem(); + } + + @After + public void shutdownCluster() throws IOException { + cluster.shutdown(); + } + + @Test + public void testBlockInitialEncoding() + throws IOException, InterruptedException { + final Path testDir = new Path("/test"); + fs.mkdir(testDir, FsPermission.getDirDefault()); + dfsAdmin.setStoragePolicy(testDir, EC_STORAGE_POLICY_NAME); + final Path ECFilePath = new Path("/test/foo.ec"); + DFSTestUtil.createFile(fs, ECFilePath, 4 * BLOCK_SIZE, (short) 3, 0); + INode inode = namesystem.getFSDirectory().getINode(ECFilePath.toString()); + assertEquals(EC_STORAGE_POLICY_ID, inode.getStoragePolicyID()); + } + +} \ No newline at end of file From 3ff144dbc0188a32b2883be5baaadc27f422b859 Mon Sep 17 00:00:00 2001 From: Harsh J Date: Mon, 8 Dec 2014 15:57:52 +0530 Subject: [PATCH 002/432] MAPREDUCE-6177. Minor typo in the EncryptedShuffle document about ssl-client.xml. Contributed by Yangping Wu. (harsh) --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../src/site/apt/EncryptedShuffle.apt.vm | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 3f34acd679b8e..c757d40e53815 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -246,6 +246,9 @@ Release 2.7.0 - UNRELEASED BUG FIXES + MAPREDUCE-6177. Minor typo in the EncryptedShuffle document about + ssl-client.xml (Yangping Wu via harsh) + MAPREDUCE-5918. LineRecordReader can return the same decompressor to CodecPool multiple times (Sergey Murylev via raviprak) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/EncryptedShuffle.apt.vm b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/EncryptedShuffle.apt.vm index da412df7877d0..68e569ddeac7a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/EncryptedShuffle.apt.vm +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/EncryptedShuffle.apt.vm @@ -202,7 +202,7 @@ Hadoop MapReduce Next Generation - Encrypted Shuffle ** <<>> (Reducer/Fetcher) Configuration: - The mapred user should own the <> file and it should have + The mapred user should own the <> file and it should have default permissions. *---------------------------------------------+---------------------+-----------------+ From 9816c59f3ad85548f5ff4f7fca15ee4f562984a4 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Mon, 8 Dec 2014 15:30:34 +0000 Subject: [PATCH 003/432] HADOOP-10530 Make hadoop build on Java7+ only (stevel) --- BUILDING.txt | 4 ++-- hadoop-assemblies/pom.xml | 4 ++-- .../hadoop-annotations/pom.xml | 17 ----------------- .../hadoop-common/CHANGES.txt | 2 ++ hadoop-project/pom.xml | 19 +++++++++++++++---- pom.xml | 2 +- 6 files changed, 22 insertions(+), 26 deletions(-) diff --git a/BUILDING.txt b/BUILDING.txt index 06bef1fc33048..94cbe5ea14834 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -4,7 +4,7 @@ Build instructions for Hadoop Requirements: * Unix System -* JDK 1.6+ +* JDK 1.7+ * Maven 3.0 or later * Findbugs 1.3.9 (if running findbugs) * ProtocolBuffer 2.5.0 @@ -204,7 +204,7 @@ Building on Windows Requirements: * Windows System -* JDK 1.6+ +* JDK 1.7+ * Maven 3.0 or later * Findbugs 1.3.9 (if running findbugs) * ProtocolBuffer 2.5.0 diff --git a/hadoop-assemblies/pom.xml b/hadoop-assemblies/pom.xml index 66b6bdb16bd14..b53baccaad516 100644 --- a/hadoop-assemblies/pom.xml +++ b/hadoop-assemblies/pom.xml @@ -45,10 +45,10 @@ - [3.0.0,) + ${enforced.maven.version} - 1.6 + ${enforced.java.version} diff --git a/hadoop-common-project/hadoop-annotations/pom.xml b/hadoop-common-project/hadoop-annotations/pom.xml index 84a106e665325..c011b4581ea30 100644 --- a/hadoop-common-project/hadoop-annotations/pom.xml +++ b/hadoop-common-project/hadoop-annotations/pom.xml @@ -39,23 +39,6 @@ - - os.linux - - - !Mac - - - - - jdk.tools - jdk.tools - 1.6 - system - ${java.home}/../lib/tools.jar - - - jdk1.7 diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index a6263887f0487..616842fa5ff89 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -368,6 +368,8 @@ Release 2.7.0 - UNRELEASED INCOMPATIBLE CHANGES + HADOOP-10530 Make hadoop build on Java7+ only (stevel) + NEW FEATURES HADOOP-10987. Provide an iterator-based listing API for FileSystem (kihwal) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index d3c404e59f5bf..3b52dc3bb190e 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -73,6 +73,17 @@ 3.4.6 6.0.41 + + + 1.7 + + + + [${javac.version},) + [3.0.2,) @@ -922,8 +933,8 @@ maven-compiler-plugin 2.5.1 - 1.6 - 1.6 + ${javac.version} + ${javac.version} @@ -1182,8 +1193,8 @@ maven-compiler-plugin true - 1.6 - 1.6 + ${javac.version} + ${javac.version} 9999 diff --git a/pom.xml b/pom.xml index 5cc30c24be1b3..de51a65f66650 100644 --- a/pom.xml +++ b/pom.xml @@ -124,7 +124,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs [3.0.2,) - 1.6 + [1.7,) From 3696f9d1a1216c3c384d65fb7289983225f6f771 Mon Sep 17 00:00:00 2001 From: cnauroth Date: Mon, 8 Dec 2014 10:23:09 -0800 Subject: [PATCH 004/432] HDFS-7384. getfacl command and getAclStatus output should be in sync. Contributed by Vinayakumar B. --- .../apache/hadoop/fs/permission/AclEntry.java | 4 +- .../hadoop/fs/permission/AclStatus.java | 79 ++++++++++++++++++- .../apache/hadoop/fs/shell/AclCommands.java | 32 ++++---- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hadoop/hdfs/protocolPB/PBHelper.java | 19 +++-- .../hdfs/server/namenode/FSDirAclOp.java | 4 +- .../offlineImageViewer/FSImageLoader.java | 31 +++----- .../org/apache/hadoop/hdfs/web/JsonUtil.java | 17 +++- .../hadoop-hdfs/src/main/proto/acl.proto | 1 + .../hadoop-hdfs/src/site/apt/WebHDFS.apt.vm | 1 + .../hdfs/server/namenode/FSAclBaseTest.java | 46 +++++++++++ .../src/test/resources/testAclCLI.xml | 53 +++++++++++++ 12 files changed, 246 insertions(+), 44 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java index b65b7a0b438b2..b9def6447a870 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java @@ -146,7 +146,9 @@ public Builder setType(AclEntryType type) { * @return Builder this builder, for call chaining */ public Builder setName(String name) { - this.name = name; + if (name != null && !name.isEmpty()) { + this.name = name; + } return this; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java index 4a7258f0a2738..9d7500a697b1b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java @@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceStability; import com.google.common.base.Objects; +import com.google.common.base.Preconditions; import com.google.common.collect.Lists; /** @@ -36,6 +37,7 @@ public class AclStatus { private final String group; private final boolean stickyBit; private final List entries; + private final FsPermission permission; /** * Returns the file owner. @@ -73,6 +75,14 @@ public List getEntries() { return entries; } + /** + * Returns the permission set for the path + * @return {@link FsPermission} for the path + */ + public FsPermission getPermission() { + return permission; + } + @Override public boolean equals(Object o) { if (o == null) { @@ -113,6 +123,7 @@ public static class Builder { private String group; private boolean stickyBit; private List entries = Lists.newArrayList(); + private FsPermission permission = null; /** * Sets the file owner. @@ -172,13 +183,22 @@ public Builder stickyBit(boolean stickyBit) { return this; } + /** + * Sets the permission for the file. + * @param permission + */ + public Builder setPermission(FsPermission permission) { + this.permission = permission; + return this; + } + /** * Builds a new AclStatus populated with the set properties. * * @return AclStatus new AclStatus */ public AclStatus build() { - return new AclStatus(owner, group, stickyBit, entries); + return new AclStatus(owner, group, stickyBit, entries, permission); } } @@ -190,12 +210,67 @@ public AclStatus build() { * @param group String file group * @param stickyBit the sticky bit * @param entries the ACL entries + * @param permission permission of the path */ private AclStatus(String owner, String group, boolean stickyBit, - Iterable entries) { + Iterable entries, FsPermission permission) { this.owner = owner; this.group = group; this.stickyBit = stickyBit; this.entries = Lists.newArrayList(entries); + this.permission = permission; + } + + /** + * Get the effective permission for the AclEntry + * @param entry AclEntry to get the effective action + */ + public FsAction getEffectivePermission(AclEntry entry) { + return getEffectivePermission(entry, permission); + } + + /** + * Get the effective permission for the AclEntry.
+ * Recommended to use this API ONLY if client communicates with the old + * NameNode, needs to pass the Permission for the path to get effective + * permission, else use {@link AclStatus#getEffectivePermission(AclEntry)}. + * @param entry AclEntry to get the effective action + * @param permArg Permission for the path. However if the client is NOT + * communicating with old namenode, then this argument will not have + * any preference. + * @return Returns the effective permission for the entry. + * @throws IllegalArgumentException If the client communicating with old + * namenode and permission is not passed as an argument. + */ + public FsAction getEffectivePermission(AclEntry entry, FsPermission permArg) + throws IllegalArgumentException { + // At least one permission bits should be available. + Preconditions.checkArgument(this.permission != null || permArg != null, + "Permission bits are not available to calculate effective permission"); + if (this.permission != null) { + // permission bits from server response will have the priority for + // accuracy. + permArg = this.permission; + } + if ((entry.getName() != null || entry.getType() == AclEntryType.GROUP)) { + if (entry.getScope() == AclEntryScope.ACCESS) { + FsAction entryPerm = entry.getPermission(); + return entryPerm.and(permArg.getGroupAction()); + } else { + Preconditions.checkArgument(this.entries.contains(entry) + && this.entries.size() >= 3, + "Passed default ACL entry not found in the list of ACLs"); + // default mask entry for effective permission calculation will be the + // penultimate entry. This can be mask entry in case of extended ACLs. + // In case of minimal ACL, this is the owner group entry, and we end up + // intersecting group FsAction with itself, which is a no-op. + FsAction defaultMask = this.entries.get(this.entries.size() - 2) + .getPermission(); + FsAction entryPerm = entry.getPermission(); + return entryPerm.and(defaultMask); + } + } else { + return entry.getPermission(); + } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java index 206576cfa547c..d139ebadce3bc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java @@ -86,22 +86,26 @@ protected void processPath(PathData item) throws IOException { (perm.getOtherAction().implies(FsAction.EXECUTE) ? "t" : "T")); } - List entries = perm.getAclBit() ? - item.fs.getAclStatus(item.path).getEntries() : - Collections.emptyList(); + AclStatus aclStatus = item.fs.getAclStatus(item.path); + List entries = perm.getAclBit() ? aclStatus.getEntries() + : Collections. emptyList(); ScopedAclEntries scopedEntries = new ScopedAclEntries( AclUtil.getAclFromPermAndEntries(perm, entries)); - printAclEntriesForSingleScope(scopedEntries.getAccessEntries()); - printAclEntriesForSingleScope(scopedEntries.getDefaultEntries()); + printAclEntriesForSingleScope(aclStatus, perm, + scopedEntries.getAccessEntries()); + printAclEntriesForSingleScope(aclStatus, perm, + scopedEntries.getDefaultEntries()); out.println(); } /** * Prints all the ACL entries in a single scope. - * + * @param aclStatus AclStatus for the path + * @param fsPerm FsPermission for the path * @param entries List containing ACL entries of file */ - private void printAclEntriesForSingleScope(List entries) { + private void printAclEntriesForSingleScope(AclStatus aclStatus, + FsPermission fsPerm, List entries) { if (entries.isEmpty()) { return; } @@ -110,10 +114,8 @@ private void printAclEntriesForSingleScope(List entries) { out.println(entry); } } else { - // ACL sort order guarantees mask is the second-to-last entry. - FsAction maskPerm = entries.get(entries.size() - 2).getPermission(); for (AclEntry entry: entries) { - printExtendedAclEntry(entry, maskPerm); + printExtendedAclEntry(aclStatus, fsPerm, entry); } } } @@ -123,14 +125,16 @@ private void printAclEntriesForSingleScope(List entries) { * permissions of the entry, then also prints the restricted version as the * effective permissions. The mask applies to all named entries and also * the unnamed group entry. - * + * @param aclStatus AclStatus for the path + * @param fsPerm FsPermission for the path * @param entry AclEntry extended ACL entry to print - * @param maskPerm FsAction permissions in the ACL's mask entry */ - private void printExtendedAclEntry(AclEntry entry, FsAction maskPerm) { + private void printExtendedAclEntry(AclStatus aclStatus, + FsPermission fsPerm, AclEntry entry) { if (entry.getName() != null || entry.getType() == AclEntryType.GROUP) { FsAction entryPerm = entry.getPermission(); - FsAction effectivePerm = entryPerm.and(maskPerm); + FsAction effectivePerm = aclStatus + .getEffectivePermission(entry, fsPerm); if (entryPerm != effectivePerm) { out.println(String.format("%s\t#effective:%s", entry, effectivePerm.SYMBOL)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 769be433084a8..7fcc8d24715fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -441,6 +441,9 @@ Release 2.7.0 - UNRELEASED HDFS-7476. Consolidate ACL-related operations to a single class. (wheat9 via cnauroth) + HDFS-7384. 'getfacl' command and 'getAclStatus' output should be in sync. + (Vinayakumar B via cnauroth) + OPTIMIZATIONS HDFS-7454. Reduce memory footprint for AclEntries in NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 2a5edc8dc9443..5a3658573c1da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -2278,15 +2278,24 @@ public static List convertAclEntry(List aclSpec) { public static AclStatus convert(GetAclStatusResponseProto e) { AclStatusProto r = e.getResult(); - return new AclStatus.Builder().owner(r.getOwner()).group(r.getGroup()) - .stickyBit(r.getSticky()) - .addEntries(convertAclEntry(r.getEntriesList())).build(); + AclStatus.Builder builder = new AclStatus.Builder(); + builder.owner(r.getOwner()).group(r.getGroup()).stickyBit(r.getSticky()) + .addEntries(convertAclEntry(r.getEntriesList())); + if (r.hasPermission()) { + builder.setPermission(convert(r.getPermission())); + } + return builder.build(); } public static GetAclStatusResponseProto convert(AclStatus e) { - AclStatusProto r = AclStatusProto.newBuilder().setOwner(e.getOwner()) + AclStatusProto.Builder builder = AclStatusProto.newBuilder(); + builder.setOwner(e.getOwner()) .setGroup(e.getGroup()).setSticky(e.isStickyBit()) - .addAllEntries(convertAclEntryProto(e.getEntries())).build(); + .addAllEntries(convertAclEntryProto(e.getEntries())); + if (e.getPermission() != null) { + builder.setPermission(convert(e.getPermission())); + } + AclStatusProto r = builder.build(); return GetAclStatusResponseProto.newBuilder().setResult(r).build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java index ac899aab362ae..c2dee207c32b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java @@ -171,9 +171,11 @@ static AclStatus getAclStatus( INode inode = FSDirectory.resolveLastINode(srcs, iip); int snapshotId = iip.getPathSnapshotId(); List acl = AclStorage.readINodeAcl(inode, snapshotId); + FsPermission fsPermission = inode.getFsPermission(snapshotId); return new AclStatus.Builder() .owner(inode.getUserName()).group(inode.getGroupName()) - .stickyBit(inode.getFsPermission(snapshotId).getStickyBit()) + .stickyBit(fsPermission.getStickyBit()) + .setPermission(fsPermission) .addEntries(acl).build(); } finally { fsd.readUnlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java index ff665e7798d15..a26f1bf6cd7a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java @@ -34,10 +34,12 @@ import com.google.common.collect.ImmutableList; import com.google.protobuf.CodedInputStream; import com.google.protobuf.InvalidProtocolBufferException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; @@ -46,6 +48,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageUtil; import org.apache.hadoop.hdfs.server.namenode.FsImageProto; import org.apache.hadoop.hdfs.server.namenode.INodeId; +import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.LimitInputStream; import org.codehaus.jackson.map.ObjectMapper; @@ -314,27 +317,15 @@ private List> getFileStatusList(String path) * @throws IOException if failed to serialize fileStatus to JSON. */ String getAclStatus(String path) throws IOException { - StringBuilder sb = new StringBuilder(); - List aclEntryList = getAclEntryList(path); PermissionStatus p = getPermissionStatus(path); - sb.append("{\"AclStatus\":{\"entries\":["); - int i = 0; - for (AclEntry aclEntry : aclEntryList) { - if (i++ != 0) { - sb.append(','); - } - sb.append('"'); - sb.append(aclEntry.toString()); - sb.append('"'); - } - sb.append("],\"group\": \""); - sb.append(p.getGroupName()); - sb.append("\",\"owner\": \""); - sb.append(p.getUserName()); - sb.append("\",\"stickyBit\": "); - sb.append(p.getPermission().getStickyBit()); - sb.append("}}\n"); - return sb.toString(); + List aclEntryList = getAclEntryList(path); + FsPermission permission = p.getPermission(); + AclStatus.Builder builder = new AclStatus.Builder(); + builder.owner(p.getUserName()).group(p.getGroupName()) + .addEntries(aclEntryList).setPermission(permission) + .stickyBit(permission.getStickyBit()); + AclStatus aclStatus = builder.build(); + return JsonUtil.toJsonString(aclStatus); } private List getAclEntryList(String path) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 0a2ae2652dbf2..aa6100cc318e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -655,6 +655,16 @@ public static String toJsonString(final AclStatus status) { m.put("group", status.getGroup()); m.put("stickyBit", status.isStickyBit()); m.put("entries", status.getEntries()); + FsPermission perm = status.getPermission(); + if (perm != null) { + m.put("permission", toString(perm)); + if (perm.getAclBit()) { + m.put("aclBit", true); + } + if (perm.getEncryptedBit()) { + m.put("encBit", true); + } + } final Map> finalMap = new TreeMap>(); finalMap.put(AclStatus.class.getSimpleName(), m); @@ -673,7 +683,12 @@ public static AclStatus toAclStatus(final Map json) { aclStatusBuilder.owner((String) m.get("owner")); aclStatusBuilder.group((String) m.get("group")); aclStatusBuilder.stickyBit((Boolean) m.get("stickyBit")); - + String permString = (String) m.get("permission"); + if (permString != null) { + final FsPermission permission = toFsPermission(permString, + (Boolean) m.get("aclBit"), (Boolean) m.get("encBit")); + aclStatusBuilder.setPermission(permission); + } final Object[] entries = (Object[]) m.get("entries"); List aclEntryList = new ArrayList(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto index e940142e339a7..57cc855786720 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto @@ -58,6 +58,7 @@ message AclStatusProto { required string group = 2; required bool sticky = 3; repeated AclEntryProto entries = 4; + optional FsPermissionProto permission = 5; } message AclEditLogProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm index 54cd2ed0a4bac..662f8b81236a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm @@ -919,6 +919,7 @@ Transfer-Encoding: chunked ], "group": "supergroup", "owner": "hadoop", + "permission":"775", "stickyBit": false } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java index aff133f270d89..eda0a28580c7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java @@ -1317,6 +1317,52 @@ public void testAccess() throws IOException, InterruptedException { } } + @Test + public void testEffectiveAccess() throws Exception { + Path p1 = new Path("/testEffectiveAccess"); + fs.mkdirs(p1); + // give all access at first + fs.setPermission(p1, FsPermission.valueOf("-rwxrwxrwx")); + AclStatus aclStatus = fs.getAclStatus(p1); + assertEquals("Entries should be empty", 0, aclStatus.getEntries().size()); + assertEquals("Permission should be carried by AclStatus", + fs.getFileStatus(p1).getPermission(), aclStatus.getPermission()); + + // Add a named entries with all access + fs.modifyAclEntries(p1, Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", ALL), + aclEntry(ACCESS, GROUP, "groupY", ALL))); + aclStatus = fs.getAclStatus(p1); + assertEquals("Entries should contain owner group entry also", 3, aclStatus + .getEntries().size()); + + // restrict the access + fs.setPermission(p1, FsPermission.valueOf("-rwxr-----")); + // latest permissions should be reflected as effective permission + aclStatus = fs.getAclStatus(p1); + List entries = aclStatus.getEntries(); + for (AclEntry aclEntry : entries) { + if (aclEntry.getName() != null || aclEntry.getType() == GROUP) { + assertEquals(FsAction.ALL, aclEntry.getPermission()); + assertEquals(FsAction.READ, aclStatus.getEffectivePermission(aclEntry)); + } + } + fsAsBruce.access(p1, READ); + try { + fsAsBruce.access(p1, WRITE); + fail("Access should not be given"); + } catch (AccessControlException e) { + // expected + } + fsAsBob.access(p1, READ); + try { + fsAsBob.access(p1, WRITE); + fail("Access should not be given"); + } catch (AccessControlException e) { + // expected + } + } + /** * Creates a FileSystem for the super-user. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml index 21031ad2d2039..82a580926a1a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml @@ -317,6 +317,59 @@ + + setfacl : Add minimal default ACL + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -setfacl -m default:user::rwx /dir1 + -fs NAMENODE -getfacl /dir1 + + + -fs NAMENODE -rm -R /dir1 + + + + SubstringComparator + # file: /dir1 + + + SubstringComparator + # owner: USERNAME + + + SubstringComparator + # group: supergroup + + + SubstringComparator + user::rwx + + + SubstringComparator + group::r-x + + + SubstringComparator + other::r-x + + + SubstringComparator + default:user::rwx + + + SubstringComparator + default:group::r-x + + + SubstringComparator + default:other::r-x + + + RegexpAcrossOutputComparator + .*(?!default\:mask)* + + + setfacl : try adding default ACL to file From 78efdb1889f729a47598f4354732b7d6203e35ba Mon Sep 17 00:00:00 2001 From: cnauroth Date: Mon, 8 Dec 2014 11:04:29 -0800 Subject: [PATCH 005/432] HDFS-7473. Document setting dfs.namenode.fs-limits.max-directory-items to 0 is invalid. Contributed by Akira AJISAKA. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../org/apache/hadoop/hdfs/server/namenode/FSDirectory.java | 2 +- .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 3 ++- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7fcc8d24715fb..fabb98f63604a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -550,6 +550,9 @@ Release 2.7.0 - UNRELEASED HDFS-7472. Fix typo in message of ReplicaNotFoundException. (Masatake Iwasaki via wheat9) + HDFS-7473. Document setting dfs.namenode.fs-limits.max-directory-items to 0 + is invalid. (Akira AJISAKA via cnauroth) + Release 2.6.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 82741ce0aa9ac..aee79afa1afe7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -242,7 +242,7 @@ public int getWriteHoldCount() { Preconditions.checkArgument( maxDirItems > 0 && maxDirItems <= MAX_DIR_ITEMS, "Cannot set " + DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY - + " to a value less than 0 or greater than " + MAX_DIR_ITEMS); + + " to a value less than 1 or greater than " + MAX_DIR_ITEMS); int threshold = conf.getInt( DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 06d7ba81de468..55a876e0ab5a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -314,7 +314,8 @@ dfs.namenode.fs-limits.max-directory-items 1048576 Defines the maximum number of items that a directory may - contain. A value of 0 will disable the check. + contain. Cannot set the property to a value less than 1 or more than + 6400000. From 395f877b85b7903ea5583c47f59906b75dc63784 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Mon, 8 Dec 2014 11:08:17 -0800 Subject: [PATCH 006/432] HADOOP-11354. ThrottledInputStream doesn't perform effective throttling. Contributed by Ted Yu. --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../org/apache/hadoop/tools/util/ThrottledInputStream.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 616842fa5ff89..d4962766bed52 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -516,6 +516,9 @@ Release 2.7.0 - UNRELEASED HADOOP-11343. Overflow is not properly handled in caclulating final iv for AES CTR. (Jerry Chen via wang) + HADOOP-11354. ThrottledInputStream doesn't perform effective throttling. + (Ted Yu via jing9) + Release 2.6.0 - 2014-11-18 INCOMPATIBLE CHANGES diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java index f6fe11847a25b..d08a3012e27aa 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java @@ -115,7 +115,7 @@ public int read(long position, byte[] buffer, int offset, int length) } private void throttle() throws IOException { - if (getBytesPerSec() > maxBytesPerSec) { + while (getBytesPerSec() > maxBytesPerSec) { try { Thread.sleep(SLEEP_DURATION_MS); totalSleepTime += SLEEP_DURATION_MS; From a27c9583230f2fd928873abcecf9ee75960120b9 Mon Sep 17 00:00:00 2001 From: Haohui Mai Date: Mon, 8 Dec 2014 11:52:21 -0800 Subject: [PATCH 007/432] HDFS-7486. Consolidate XAttr-related implementation into a single class. Contributed by Haohui Mai. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../namenode/EncryptionZoneManager.java | 3 +- .../hdfs/server/namenode/FSDirXAttrOp.java | 460 ++++++++++++++++++ .../hdfs/server/namenode/FSDirectory.java | 295 ++--------- .../hdfs/server/namenode/FSEditLogLoader.java | 19 +- .../hdfs/server/namenode/FSNamesystem.java | 227 +-------- .../hdfs/server/namenode/TestFSDirectory.java | 47 +- 7 files changed, 554 insertions(+), 500 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index fabb98f63604a..55026a2679cc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -444,6 +444,9 @@ Release 2.7.0 - UNRELEASED HDFS-7384. 'getfacl' command and 'getAclStatus' output should be in sync. (Vinayakumar B via cnauroth) + HDFS-7486. Consolidate XAttr-related implementation into a single class. + (wheat9) + OPTIMIZATIONS HDFS-7454. Reduce memory footprint for AclEntries in NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index 135979f50dc81..faab1f0b08a17 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -311,7 +311,8 @@ XAttr createEncryptionZone(String src, CipherSuite suite, xattrs.add(ezXAttr); // updating the xattr will call addEncryptionZone, // done this way to handle edit log loading - dir.unprotectedSetXAttrs(src, xattrs, EnumSet.of(XAttrSetFlag.CREATE)); + FSDirXAttrOp.unprotectedSetXAttrs(dir, src, xattrs, + EnumSet.of(XAttrSetFlag.CREATE)); return ezXAttr; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java new file mode 100644 index 0000000000000..303b9e3a12ef3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java @@ -0,0 +1,460 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Charsets; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.XAttrHelper; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; +import org.apache.hadoop.hdfs.protocolPB.PBHelper; +import org.apache.hadoop.security.AccessControlException; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.List; +import java.util.ListIterator; + +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; + +class FSDirXAttrOp { + private static final XAttr KEYID_XATTR = + XAttrHelper.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, null); + private static final XAttr UNREADABLE_BY_SUPERUSER_XATTR = + XAttrHelper.buildXAttr(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER, null); + + /** + * Set xattr for a file or directory. + * + * @param src + * - path on which it sets the xattr + * @param xAttr + * - xAttr details to set + * @param flag + * - xAttrs flags + * @throws IOException + */ + static HdfsFileStatus setXAttr( + FSDirectory fsd, String src, XAttr xAttr, EnumSet flag, + boolean logRetryCache) + throws IOException { + checkXAttrsConfigFlag(fsd); + checkXAttrSize(fsd, xAttr); + FSPermissionChecker pc = fsd.getPermissionChecker(); + XAttrPermissionFilter.checkPermissionForApi( + pc, xAttr, FSDirectory.isReservedRawName(src)); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath( + src); + src = fsd.resolvePath(pc, src, pathComponents); + final INodesInPath iip = fsd.getINodesInPath4Write(src); + checkXAttrChangeAccess(fsd, iip, xAttr, pc); + List xAttrs = Lists.newArrayListWithCapacity(1); + xAttrs.add(xAttr); + fsd.writeLock(); + try { + unprotectedSetXAttrs(fsd, src, xAttrs, flag); + } finally { + fsd.writeUnlock(); + } + fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache); + return fsd.getAuditFileInfo(src, false); + } + + static List getXAttrs(FSDirectory fsd, final String srcArg, + List xAttrs) + throws IOException { + String src = srcArg; + checkXAttrsConfigFlag(fsd); + FSPermissionChecker pc = fsd.getPermissionChecker(); + final boolean isRawPath = FSDirectory.isReservedRawName(src); + boolean getAll = xAttrs == null || xAttrs.isEmpty(); + if (!getAll) { + XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs, isRawPath); + } + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + src = fsd.resolvePath(pc, src, pathComponents); + final INodesInPath iip = fsd.getINodesInPath(src, true); + if (fsd.isPermissionEnabled()) { + fsd.checkPathAccess(pc, iip, FsAction.READ); + } + List all = FSDirXAttrOp.getXAttrs(fsd, src); + List filteredAll = XAttrPermissionFilter. + filterXAttrsForApi(pc, all, isRawPath); + + if (getAll) { + return filteredAll; + } + if (filteredAll == null || filteredAll.isEmpty()) { + return null; + } + List toGet = Lists.newArrayListWithCapacity(xAttrs.size()); + for (XAttr xAttr : xAttrs) { + boolean foundIt = false; + for (XAttr a : filteredAll) { + if (xAttr.getNameSpace() == a.getNameSpace() && xAttr.getName().equals( + a.getName())) { + toGet.add(a); + foundIt = true; + break; + } + } + if (!foundIt) { + throw new IOException( + "At least one of the attributes provided was not found."); + } + } + return toGet; + } + + static List listXAttrs( + FSDirectory fsd, String src) throws IOException { + FSDirXAttrOp.checkXAttrsConfigFlag(fsd); + final FSPermissionChecker pc = fsd.getPermissionChecker(); + final boolean isRawPath = FSDirectory.isReservedRawName(src); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + src = fsd.resolvePath(pc, src, pathComponents); + final INodesInPath iip = fsd.getINodesInPath(src, true); + if (fsd.isPermissionEnabled()) { + /* To access xattr names, you need EXECUTE in the owning directory. */ + fsd.checkParentAccess(pc, iip, FsAction.EXECUTE); + } + final List all = FSDirXAttrOp.getXAttrs(fsd, src); + return XAttrPermissionFilter. + filterXAttrsForApi(pc, all, isRawPath); + } + + /** + * Remove an xattr for a file or directory. + * + * @param src + * - path to remove the xattr from + * @param xAttr + * - xAttr to remove + * @throws IOException + */ + static HdfsFileStatus removeXAttr( + FSDirectory fsd, String src, XAttr xAttr, boolean logRetryCache) + throws IOException { + FSDirXAttrOp.checkXAttrsConfigFlag(fsd); + FSPermissionChecker pc = fsd.getPermissionChecker(); + XAttrPermissionFilter.checkPermissionForApi( + pc, xAttr, FSDirectory.isReservedRawName(src)); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath( + src); + src = fsd.resolvePath(pc, src, pathComponents); + final INodesInPath iip = fsd.getINodesInPath4Write(src); + checkXAttrChangeAccess(fsd, iip, xAttr, pc); + + List xAttrs = Lists.newArrayListWithCapacity(1); + xAttrs.add(xAttr); + fsd.writeLock(); + try { + List removedXAttrs = unprotectedRemoveXAttrs(fsd, src, xAttrs); + if (removedXAttrs != null && !removedXAttrs.isEmpty()) { + fsd.getEditLog().logRemoveXAttrs(src, removedXAttrs, logRetryCache); + } else { + throw new IOException( + "No matching attributes found for remove operation"); + } + } finally { + fsd.writeUnlock(); + } + return fsd.getAuditFileInfo(src, false); + } + + static List unprotectedRemoveXAttrs( + FSDirectory fsd, final String src, final List toRemove) + throws IOException { + assert fsd.hasWriteLock(); + INodesInPath iip = fsd.getINodesInPath4Write( + FSDirectory.normalizePath(src), true); + INode inode = FSDirectory.resolveLastINode(src, iip); + int snapshotId = iip.getLatestSnapshotId(); + List existingXAttrs = XAttrStorage.readINodeXAttrs(inode); + List removedXAttrs = Lists.newArrayListWithCapacity(toRemove.size()); + List newXAttrs = filterINodeXAttrs(existingXAttrs, toRemove, + removedXAttrs); + if (existingXAttrs.size() != newXAttrs.size()) { + XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId); + return removedXAttrs; + } + return null; + } + + /** + * Filter XAttrs from a list of existing XAttrs. Removes matched XAttrs from + * toFilter and puts them into filtered. Upon completion, + * toFilter contains the filter XAttrs that were not found, while + * fitleredXAttrs contains the XAttrs that were found. + * + * @param existingXAttrs Existing XAttrs to be filtered + * @param toFilter XAttrs to filter from the existing XAttrs + * @param filtered Return parameter, XAttrs that were filtered + * @return List of XAttrs that does not contain filtered XAttrs + */ + @VisibleForTesting + static List filterINodeXAttrs( + final List existingXAttrs, final List toFilter, + final List filtered) + throws AccessControlException { + if (existingXAttrs == null || existingXAttrs.isEmpty() || + toFilter == null || toFilter.isEmpty()) { + return existingXAttrs; + } + + // Populate a new list with XAttrs that pass the filter + List newXAttrs = + Lists.newArrayListWithCapacity(existingXAttrs.size()); + for (XAttr a : existingXAttrs) { + boolean add = true; + for (ListIterator it = toFilter.listIterator(); it.hasNext() + ;) { + XAttr filter = it.next(); + Preconditions.checkArgument( + !KEYID_XATTR.equalsIgnoreValue(filter), + "The encryption zone xattr should never be deleted."); + if (UNREADABLE_BY_SUPERUSER_XATTR.equalsIgnoreValue(filter)) { + throw new AccessControlException("The xattr '" + + SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' can not be deleted."); + } + if (a.equalsIgnoreValue(filter)) { + add = false; + it.remove(); + filtered.add(filter); + break; + } + } + if (add) { + newXAttrs.add(a); + } + } + + return newXAttrs; + } + + static INode unprotectedSetXAttrs( + FSDirectory fsd, final String src, final List xAttrs, + final EnumSet flag) + throws IOException { + assert fsd.hasWriteLock(); + INodesInPath iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true); + INode inode = FSDirectory.resolveLastINode(src, iip); + int snapshotId = iip.getLatestSnapshotId(); + List existingXAttrs = XAttrStorage.readINodeXAttrs(inode); + List newXAttrs = setINodeXAttrs(fsd, existingXAttrs, xAttrs, flag); + final boolean isFile = inode.isFile(); + + for (XAttr xattr : newXAttrs) { + final String xaName = XAttrHelper.getPrefixName(xattr); + + /* + * If we're adding the encryption zone xattr, then add src to the list + * of encryption zones. + */ + if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) { + final HdfsProtos.ZoneEncryptionInfoProto ezProto = + HdfsProtos.ZoneEncryptionInfoProto.parseFrom(xattr.getValue()); + fsd.ezManager.addEncryptionZone(inode.getId(), + PBHelper.convert(ezProto.getSuite()), + PBHelper.convert( + ezProto.getCryptoProtocolVersion()), + ezProto.getKeyName()); + } + + if (!isFile && SECURITY_XATTR_UNREADABLE_BY_SUPERUSER.equals(xaName)) { + throw new IOException("Can only set '" + + SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' on a file."); + } + } + + XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId); + return inode; + } + + static List setINodeXAttrs( + FSDirectory fsd, final List existingXAttrs, + final List toSet, final EnumSet flag) + throws IOException { + // Check for duplicate XAttrs in toSet + // We need to use a custom comparator, so using a HashSet is not suitable + for (int i = 0; i < toSet.size(); i++) { + for (int j = i + 1; j < toSet.size(); j++) { + if (toSet.get(i).equalsIgnoreValue(toSet.get(j))) { + throw new IOException("Cannot specify the same XAttr to be set " + + "more than once"); + } + } + } + + // Count the current number of user-visible XAttrs for limit checking + int userVisibleXAttrsNum = 0; // Number of user visible xAttrs + + // The XAttr list is copied to an exactly-sized array when it's stored, + // so there's no need to size it precisely here. + int newSize = (existingXAttrs != null) ? existingXAttrs.size() : 0; + newSize += toSet.size(); + List xAttrs = Lists.newArrayListWithCapacity(newSize); + + // Check if the XAttr already exists to validate with the provided flag + for (XAttr xAttr: toSet) { + boolean exist = false; + if (existingXAttrs != null) { + for (XAttr a : existingXAttrs) { + if (a.equalsIgnoreValue(xAttr)) { + exist = true; + break; + } + } + } + XAttrSetFlag.validate(xAttr.getName(), exist, flag); + // add the new XAttr since it passed validation + xAttrs.add(xAttr); + if (isUserVisible(xAttr)) { + userVisibleXAttrsNum++; + } + } + + // Add the existing xattrs back in, if they weren't already set + if (existingXAttrs != null) { + for (XAttr existing : existingXAttrs) { + boolean alreadySet = false; + for (XAttr set : toSet) { + if (set.equalsIgnoreValue(existing)) { + alreadySet = true; + break; + } + } + if (!alreadySet) { + xAttrs.add(existing); + if (isUserVisible(existing)) { + userVisibleXAttrsNum++; + } + } + } + } + + if (userVisibleXAttrsNum > fsd.getInodeXAttrsLimit()) { + throw new IOException("Cannot add additional XAttr to inode, " + + "would exceed limit of " + fsd.getInodeXAttrsLimit()); + } + + return xAttrs; + } + + static List getXAttrs(FSDirectory fsd, INode inode, int snapshotId) + throws IOException { + fsd.readLock(); + try { + return XAttrStorage.readINodeXAttrs(inode, snapshotId); + } finally { + fsd.readUnlock(); + } + } + + static XAttr unprotectedGetXAttrByName( + INode inode, int snapshotId, String xAttrName) + throws IOException { + List xAttrs = XAttrStorage.readINodeXAttrs(inode, snapshotId); + if (xAttrs == null) { + return null; + } + for (XAttr x : xAttrs) { + if (XAttrHelper.getPrefixName(x) + .equals(xAttrName)) { + return x; + } + } + return null; + } + + private static void checkXAttrChangeAccess( + FSDirectory fsd, INodesInPath iip, XAttr xAttr, + FSPermissionChecker pc) + throws AccessControlException { + if (fsd.isPermissionEnabled() && xAttr.getNameSpace() == XAttr.NameSpace + .USER) { + final INode inode = iip.getLastINode(); + if (inode != null && + inode.isDirectory() && + inode.getFsPermission().getStickyBit()) { + if (!pc.isSuperUser()) { + fsd.checkOwner(pc, iip); + } + } else { + fsd.checkPathAccess(pc, iip, FsAction.WRITE); + } + } + } + + /** + * Verifies that the combined size of the name and value of an xattr is within + * the configured limit. Setting a limit of zero disables this check. + */ + private static void checkXAttrSize(FSDirectory fsd, XAttr xAttr) { + if (fsd.getXattrMaxSize() == 0) { + return; + } + int size = xAttr.getName().getBytes(Charsets.UTF_8).length; + if (xAttr.getValue() != null) { + size += xAttr.getValue().length; + } + if (size > fsd.getXattrMaxSize()) { + throw new HadoopIllegalArgumentException( + "The XAttr is too big. The maximum combined size of the" + + " name and value is " + fsd.getXattrMaxSize() + + ", but the total size is " + size); + } + } + + private static void checkXAttrsConfigFlag(FSDirectory fsd) throws + IOException { + if (!fsd.isXattrsEnabled()) { + throw new IOException(String.format( + "The XAttr operation has been rejected. " + + "Support for XAttrs has been disabled by setting %s to false.", + DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY)); + } + } + + private static List getXAttrs(FSDirectory fsd, + String src) throws IOException { + String srcs = FSDirectory.normalizePath(src); + fsd.readLock(); + try { + INodesInPath iip = fsd.getLastINodeInPath(srcs, true); + INode inode = FSDirectory.resolveLastINode(src, iip); + int snapshotId = iip.getPathSnapshotId(); + return XAttrStorage.readINodeXAttrs(inode, snapshotId); + } finally { + fsd.readUnlock(); + } + } + + private static boolean isUserVisible(XAttr xAttr) { + XAttr.NameSpace ns = xAttr.getNameSpace(); + return ns == XAttr.NameSpace.USER || ns == XAttr.NameSpace.TRUSTED; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index aee79afa1afe7..e8026274b7402 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -20,7 +20,6 @@ import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO; -import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; import static org.apache.hadoop.util.Time.now; import java.io.Closeable; @@ -30,7 +29,6 @@ import java.util.Arrays; import java.util.EnumSet; import java.util.List; -import java.util.ListIterator; import java.util.concurrent.locks.ReentrantReadWriteLock; import com.google.protobuf.InvalidProtocolBufferException; @@ -122,10 +120,6 @@ private static INodeDirectory createRoot(FSNamesystem namesystem) { public final static String DOT_INODES_STRING = ".inodes"; public final static byte[] DOT_INODES = DFSUtil.string2Bytes(DOT_INODES_STRING); - private final XAttr KEYID_XATTR = - XAttrHelper.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, null); - private final XAttr UNREADABLE_BY_SUPERUSER_XATTR = - XAttrHelper.buildXAttr(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER, null); INodeDirectory rootDir; private final FSNamesystem namesystem; @@ -136,6 +130,7 @@ private static INodeDirectory createRoot(FSNamesystem namesystem) { private final int contentCountLimit; // max content summary counts per run private final INodeMap inodeMap; // Synchronized by dirLock private long yieldCount = 0; // keep track of lock yield count. + private final int inodeXAttrsLimit; //inode xattrs max limit // lock to protect the directory and BlockMap @@ -148,6 +143,8 @@ private static INodeDirectory createRoot(FSNamesystem namesystem) { * ACL-related operations. */ private final boolean aclsEnabled; + private final boolean xattrsEnabled; + private final int xattrMaxSize; private final String fsOwnerShortUserName; private final String supergroup; private final INodeId inodeId; @@ -213,6 +210,18 @@ public int getWriteHoldCount() { DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_DEFAULT); LOG.info("ACLs enabled? " + aclsEnabled); + this.xattrsEnabled = conf.getBoolean( + DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, + DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_DEFAULT); + LOG.info("XAttrs enabled? " + xattrsEnabled); + this.xattrMaxSize = conf.getInt( + DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, + DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT); + Preconditions.checkArgument(xattrMaxSize >= 0, + "Cannot set a negative value for the maximum size of an xattr (%s).", + DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY); + final String unlimited = xattrMaxSize == 0 ? " (unlimited)" : ""; + LOG.info("Maximum size of an xattr: " + xattrMaxSize + unlimited); int configuredLimit = conf.getInt( DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT); this.lsLimit = configuredLimit>0 ? @@ -274,6 +283,10 @@ boolean isPermissionEnabled() { boolean isAclsEnabled() { return aclsEnabled; } + boolean isXattrsEnabled() { + return xattrsEnabled; + } + int getXattrMaxSize() { return xattrMaxSize; } int getLsLimit() { return lsLimit; @@ -283,6 +296,10 @@ int getContentCountLimit() { return contentCountLimit; } + int getInodeXAttrsLimit() { + return inodeXAttrsLimit; + } + FSEditLog getEditLog() { return editLog; } @@ -613,8 +630,11 @@ private void setDirStoragePolicy(INodeDirectory inode, byte policyId, int latestSnapshotId) throws IOException { List existingXAttrs = XAttrStorage.readINodeXAttrs(inode); XAttr xAttr = BlockStoragePolicySuite.buildXAttr(policyId); - List newXAttrs = setINodeXAttrs(existingXAttrs, Arrays.asList(xAttr), - EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE)); + List newXAttrs = FSDirXAttrOp.setINodeXAttrs(this, existingXAttrs, + Arrays.asList(xAttr), + EnumSet.of( + XAttrSetFlag.CREATE, + XAttrSetFlag.REPLACE)); XAttrStorage.updateINodeXAttrs(inode, newXAttrs, latestSnapshotId); } @@ -1560,90 +1580,6 @@ INodeSymlink unprotectedAddSymlink(long id, String path, String target, return addINode(path, symlink) ? symlink : null; } - /** - * Removes a list of XAttrs from an inode at a path. - * - * @param src path of inode - * @param toRemove XAttrs to be removed - * @return List of XAttrs that were removed - * @throws IOException if the inode does not exist, if quota is exceeded - */ - List removeXAttrs(final String src, final List toRemove) - throws IOException { - writeLock(); - try { - return unprotectedRemoveXAttrs(src, toRemove); - } finally { - writeUnlock(); - } - } - - List unprotectedRemoveXAttrs(final String src, - final List toRemove) throws IOException { - assert hasWriteLock(); - INodesInPath iip = getINodesInPath4Write(normalizePath(src), true); - INode inode = resolveLastINode(src, iip); - int snapshotId = iip.getLatestSnapshotId(); - List existingXAttrs = XAttrStorage.readINodeXAttrs(inode); - List removedXAttrs = Lists.newArrayListWithCapacity(toRemove.size()); - List newXAttrs = filterINodeXAttrs(existingXAttrs, toRemove, - removedXAttrs); - if (existingXAttrs.size() != newXAttrs.size()) { - XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId); - return removedXAttrs; - } - return null; - } - - /** - * Filter XAttrs from a list of existing XAttrs. Removes matched XAttrs from - * toFilter and puts them into filtered. Upon completion, - * toFilter contains the filter XAttrs that were not found, while - * fitleredXAttrs contains the XAttrs that were found. - * - * @param existingXAttrs Existing XAttrs to be filtered - * @param toFilter XAttrs to filter from the existing XAttrs - * @param filtered Return parameter, XAttrs that were filtered - * @return List of XAttrs that does not contain filtered XAttrs - */ - @VisibleForTesting - List filterINodeXAttrs(final List existingXAttrs, - final List toFilter, final List filtered) - throws AccessControlException { - if (existingXAttrs == null || existingXAttrs.isEmpty() || - toFilter == null || toFilter.isEmpty()) { - return existingXAttrs; - } - - // Populate a new list with XAttrs that pass the filter - List newXAttrs = - Lists.newArrayListWithCapacity(existingXAttrs.size()); - for (XAttr a : existingXAttrs) { - boolean add = true; - for (ListIterator it = toFilter.listIterator(); it.hasNext() - ;) { - XAttr filter = it.next(); - Preconditions.checkArgument(!KEYID_XATTR.equalsIgnoreValue(filter), - "The encryption zone xattr should never be deleted."); - if (UNREADABLE_BY_SUPERUSER_XATTR.equalsIgnoreValue(filter)) { - throw new AccessControlException("The xattr '" + - SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' can not be deleted."); - } - if (a.equalsIgnoreValue(filter)) { - add = false; - it.remove(); - filtered.add(filter); - break; - } - } - if (add) { - newXAttrs.add(a); - } - } - - return newXAttrs; - } - boolean isInAnEZ(INodesInPath iip) throws UnresolvedLinkException, SnapshotAccessControlException { readLock(); @@ -1709,7 +1645,8 @@ void setFileEncryptionInfo(String src, FileEncryptionInfo info) writeLock(); try { - unprotectedSetXAttrs(src, xAttrs, EnumSet.of(XAttrSetFlag.CREATE)); + FSDirXAttrOp.unprotectedSetXAttrs(this, src, xAttrs, + EnumSet.of(XAttrSetFlag.CREATE)); } finally { writeUnlock(); } @@ -1752,8 +1689,9 @@ FileEncryptionInfo getFileEncryptionInfo(INode inode, int snapshotId, final CipherSuite suite = encryptionZone.getSuite(); final String keyName = encryptionZone.getKeyName(); - XAttr fileXAttr = unprotectedGetXAttrByName(inode, snapshotId, - CRYPTO_XATTR_FILE_ENCRYPTION_INFO); + XAttr fileXAttr = FSDirXAttrOp.unprotectedGetXAttrByName(inode, + snapshotId, + CRYPTO_XATTR_FILE_ENCRYPTION_INFO); if (fileXAttr == null) { NameNode.LOG.warn("Could not find encryption XAttr for file " + @@ -1775,173 +1713,6 @@ FileEncryptionInfo getFileEncryptionInfo(INode inode, int snapshotId, } } - void setXAttrs(final String src, final List xAttrs, - final EnumSet flag) throws IOException { - writeLock(); - try { - unprotectedSetXAttrs(src, xAttrs, flag); - } finally { - writeUnlock(); - } - } - - INode unprotectedSetXAttrs(final String src, final List xAttrs, - final EnumSet flag) - throws QuotaExceededException, IOException { - assert hasWriteLock(); - INodesInPath iip = getINodesInPath4Write(normalizePath(src), true); - INode inode = resolveLastINode(src, iip); - int snapshotId = iip.getLatestSnapshotId(); - List existingXAttrs = XAttrStorage.readINodeXAttrs(inode); - List newXAttrs = setINodeXAttrs(existingXAttrs, xAttrs, flag); - final boolean isFile = inode.isFile(); - - for (XAttr xattr : newXAttrs) { - final String xaName = XAttrHelper.getPrefixName(xattr); - - /* - * If we're adding the encryption zone xattr, then add src to the list - * of encryption zones. - */ - if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) { - final HdfsProtos.ZoneEncryptionInfoProto ezProto = - HdfsProtos.ZoneEncryptionInfoProto.parseFrom(xattr.getValue()); - ezManager.addEncryptionZone(inode.getId(), - PBHelper.convert(ezProto.getSuite()), - PBHelper.convert(ezProto.getCryptoProtocolVersion()), - ezProto.getKeyName()); - } - - if (!isFile && SECURITY_XATTR_UNREADABLE_BY_SUPERUSER.equals(xaName)) { - throw new IOException("Can only set '" + - SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' on a file."); - } - } - - XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId); - return inode; - } - - List setINodeXAttrs(final List existingXAttrs, - final List toSet, final EnumSet flag) - throws IOException { - // Check for duplicate XAttrs in toSet - // We need to use a custom comparator, so using a HashSet is not suitable - for (int i = 0; i < toSet.size(); i++) { - for (int j = i + 1; j < toSet.size(); j++) { - if (toSet.get(i).equalsIgnoreValue(toSet.get(j))) { - throw new IOException("Cannot specify the same XAttr to be set " + - "more than once"); - } - } - } - - // Count the current number of user-visible XAttrs for limit checking - int userVisibleXAttrsNum = 0; // Number of user visible xAttrs - - // The XAttr list is copied to an exactly-sized array when it's stored, - // so there's no need to size it precisely here. - int newSize = (existingXAttrs != null) ? existingXAttrs.size() : 0; - newSize += toSet.size(); - List xAttrs = Lists.newArrayListWithCapacity(newSize); - - // Check if the XAttr already exists to validate with the provided flag - for (XAttr xAttr: toSet) { - boolean exist = false; - if (existingXAttrs != null) { - for (XAttr a : existingXAttrs) { - if (a.equalsIgnoreValue(xAttr)) { - exist = true; - break; - } - } - } - XAttrSetFlag.validate(xAttr.getName(), exist, flag); - // add the new XAttr since it passed validation - xAttrs.add(xAttr); - if (isUserVisible(xAttr)) { - userVisibleXAttrsNum++; - } - } - - // Add the existing xattrs back in, if they weren't already set - if (existingXAttrs != null) { - for (XAttr existing : existingXAttrs) { - boolean alreadySet = false; - for (XAttr set : toSet) { - if (set.equalsIgnoreValue(existing)) { - alreadySet = true; - break; - } - } - if (!alreadySet) { - xAttrs.add(existing); - if (isUserVisible(existing)) { - userVisibleXAttrsNum++; - } - } - } - } - - if (userVisibleXAttrsNum > inodeXAttrsLimit) { - throw new IOException("Cannot add additional XAttr to inode, " - + "would exceed limit of " + inodeXAttrsLimit); - } - - return xAttrs; - } - - private boolean isUserVisible(XAttr xAttr) { - if (xAttr.getNameSpace() == XAttr.NameSpace.USER || - xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED) { - return true; - } - return false; - } - - List getXAttrs(String src) throws IOException { - String srcs = normalizePath(src); - readLock(); - try { - INodesInPath iip = getLastINodeInPath(srcs, true); - INode inode = resolveLastINode(src, iip); - int snapshotId = iip.getPathSnapshotId(); - return unprotectedGetXAttrs(inode, snapshotId); - } finally { - readUnlock(); - } - } - - List getXAttrs(INode inode, int snapshotId) throws IOException { - readLock(); - try { - return unprotectedGetXAttrs(inode, snapshotId); - } finally { - readUnlock(); - } - } - - private List unprotectedGetXAttrs(INode inode, int snapshotId) - throws IOException { - return XAttrStorage.readINodeXAttrs(inode, snapshotId); - } - - private XAttr unprotectedGetXAttrByName(INode inode, int snapshotId, - String xAttrName) - throws IOException { - List xAttrs = XAttrStorage.readINodeXAttrs(inode, snapshotId); - if (xAttrs == null) { - return null; - } - for (XAttr x : xAttrs) { - if (XAttrHelper.getPrefixName(x) - .equals(xAttrName)) { - return x; - } - } - return null; - } - static INode resolveLastINode(String src, INodesInPath iip) throws FileNotFoundException { INode[] inodes = iip.getINodes(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index d63545b5ce729..d12ae1543b4de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -537,7 +537,8 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, } case OP_SET_GENSTAMP_V1: { SetGenstampV1Op setGenstampV1Op = (SetGenstampV1Op)op; - fsNamesys.getBlockIdManager().setGenerationStampV1(setGenstampV1Op.genStampV1); + fsNamesys.getBlockIdManager().setGenerationStampV1( + setGenstampV1Op.genStampV1); break; } case OP_SET_PERMISSIONS: { @@ -730,12 +731,14 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, } case OP_SET_GENSTAMP_V2: { SetGenstampV2Op setGenstampV2Op = (SetGenstampV2Op) op; - fsNamesys.getBlockIdManager().setGenerationStampV2(setGenstampV2Op.genStampV2); + fsNamesys.getBlockIdManager().setGenerationStampV2( + setGenstampV2Op.genStampV2); break; } case OP_ALLOCATE_BLOCK_ID: { AllocateBlockIdOp allocateBlockIdOp = (AllocateBlockIdOp) op; - fsNamesys.getBlockIdManager().setLastAllocatedBlockId(allocateBlockIdOp.blockId); + fsNamesys.getBlockIdManager().setLastAllocatedBlockId( + allocateBlockIdOp.blockId); break; } case OP_ROLLING_UPGRADE_START: { @@ -828,8 +831,10 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, } case OP_SET_XATTR: { SetXAttrOp setXAttrOp = (SetXAttrOp) op; - fsDir.unprotectedSetXAttrs(setXAttrOp.src, setXAttrOp.xAttrs, - EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE)); + FSDirXAttrOp.unprotectedSetXAttrs(fsDir, setXAttrOp.src, + setXAttrOp.xAttrs, + EnumSet.of(XAttrSetFlag.CREATE, + XAttrSetFlag.REPLACE)); if (toAddRetryCache) { fsNamesys.addCacheEntry(setXAttrOp.rpcClientId, setXAttrOp.rpcCallId); } @@ -837,8 +842,8 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, } case OP_REMOVE_XATTR: { RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op; - fsDir.unprotectedRemoveXAttrs(removeXAttrOp.src, - removeXAttrOp.xAttrs); + FSDirXAttrOp.unprotectedRemoveXAttrs(fsDir, removeXAttrOp.src, + removeXAttrOp.xAttrs); if (toAddRetryCache) { fsNamesys.addCacheEntry(removeXAttrOp.rpcClientId, removeXAttrOp.rpcCallId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index e9ce78c06d9ec..2b530fa520cc2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -532,9 +532,6 @@ private void logAuditEvent(boolean succeeded, private final RetryCache retryCache; - private final boolean xattrsEnabled; - private final int xattrMaxSize; - private KeyProviderCryptoExtension provider = null; private volatile boolean imageLoaded = false; @@ -849,19 +846,6 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { this.isDefaultAuditLogger = auditLoggers.size() == 1 && auditLoggers.get(0) instanceof DefaultAuditLogger; this.retryCache = ignoreRetryCache ? null : initRetryCache(conf); - - this.xattrsEnabled = conf.getBoolean( - DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, - DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_DEFAULT); - LOG.info("XAttrs enabled? " + xattrsEnabled); - this.xattrMaxSize = conf.getInt( - DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, - DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT); - Preconditions.checkArgument(xattrMaxSize >= 0, - "Cannot set a negative value for the maximum size of an xattr (%s).", - DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY); - final String unlimited = xattrMaxSize == 0 ? " (unlimited)" : ""; - LOG.info("Maximum size of an xattr: " + xattrMaxSize + unlimited); } catch(IOException e) { LOG.error(getClass().getSimpleName() + " initialization failed.", e); close(); @@ -5827,7 +5811,7 @@ private void checkUnreadableBySuperuser(FSPermissionChecker pc, INode inode, int snapshotId) throws IOException { if (pc.isSuperUser()) { - for (XAttr xattr : dir.getXAttrs(inode, snapshotId)) { + for (XAttr xattr : FSDirXAttrOp.getXAttrs(dir, inode, snapshotId)) { if (XAttrHelper.getPrefixName(xattr). equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) { throw new AccessControlException("Access is denied for " + @@ -7967,136 +7951,35 @@ BatchedListEntries listEncryptionZones(long prevId) } } - /** - * Set xattr for a file or directory. - * - * @param src - * - path on which it sets the xattr - * @param xAttr - * - xAttr details to set - * @param flag - * - xAttrs flags - * @throws AccessControlException - * @throws SafeModeException - * @throws UnresolvedLinkException - * @throws IOException - */ void setXAttr(String src, XAttr xAttr, EnumSet flag, boolean logRetryCache) - throws AccessControlException, SafeModeException, - UnresolvedLinkException, IOException { - try { - setXAttrInt(src, xAttr, flag, logRetryCache); - } catch (AccessControlException e) { - logAuditEvent(false, "setXAttr", src); - throw e; - } - } - - private void setXAttrInt(final String srcArg, XAttr xAttr, - EnumSet flag, boolean logRetryCache) throws IOException { - String src = srcArg; - checkXAttrsConfigFlag(); - checkXAttrSize(xAttr); - HdfsFileStatus resultingStat = null; - FSPermissionChecker pc = getPermissionChecker(); - XAttrPermissionFilter.checkPermissionForApi(pc, xAttr, - FSDirectory.isReservedRawName(src)); + throws IOException { checkOperation(OperationCategory.WRITE); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + HdfsFileStatus auditStat = null; writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot set XAttr on " + src); - src = dir.resolvePath(pc, src, pathComponents); - final INodesInPath iip = dir.getINodesInPath4Write(src); - checkXAttrChangeAccess(iip, xAttr, pc); - List xAttrs = Lists.newArrayListWithCapacity(1); - xAttrs.add(xAttr); - dir.setXAttrs(src, xAttrs, flag); - getEditLog().logSetXAttrs(src, xAttrs, logRetryCache); - resultingStat = getAuditFileInfo(src, false); + auditStat = FSDirXAttrOp.setXAttr(dir, src, xAttr, flag, logRetryCache); + } catch (AccessControlException e) { + logAuditEvent(false, "setXAttr", src); + throw e; } finally { writeUnlock(); } getEditLog().logSync(); - logAuditEvent(true, "setXAttr", srcArg, null, resultingStat); + logAuditEvent(true, "setXAttr", src, null, auditStat); } - /** - * Verifies that the combined size of the name and value of an xattr is within - * the configured limit. Setting a limit of zero disables this check. - */ - private void checkXAttrSize(XAttr xAttr) { - if (xattrMaxSize == 0) { - return; - } - int size = xAttr.getName().getBytes(Charsets.UTF_8).length; - if (xAttr.getValue() != null) { - size += xAttr.getValue().length; - } - if (size > xattrMaxSize) { - throw new HadoopIllegalArgumentException( - "The XAttr is too big. The maximum combined size of the" - + " name and value is " + xattrMaxSize - + ", but the total size is " + size); - } - } - - List getXAttrs(final String srcArg, List xAttrs) + List getXAttrs(final String src, List xAttrs) throws IOException { - String src = srcArg; - checkXAttrsConfigFlag(); - FSPermissionChecker pc = getPermissionChecker(); - final boolean isRawPath = FSDirectory.isReservedRawName(src); - boolean getAll = xAttrs == null || xAttrs.isEmpty(); - if (!getAll) { - try { - XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs, isRawPath); - } catch (AccessControlException e) { - logAuditEvent(false, "getXAttrs", srcArg); - throw e; - } - } checkOperation(OperationCategory.READ); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { checkOperation(OperationCategory.READ); - src = dir.resolvePath(pc, src, pathComponents); - final INodesInPath iip = dir.getINodesInPath(src, true); - if (isPermissionEnabled) { - dir.checkPathAccess(pc, iip, FsAction.READ); - } - List all = dir.getXAttrs(src); - List filteredAll = XAttrPermissionFilter. - filterXAttrsForApi(pc, all, isRawPath); - if (getAll) { - return filteredAll; - } else { - if (filteredAll == null || filteredAll.isEmpty()) { - return null; - } - List toGet = Lists.newArrayListWithCapacity(xAttrs.size()); - for (XAttr xAttr : xAttrs) { - boolean foundIt = false; - for (XAttr a : filteredAll) { - if (xAttr.getNameSpace() == a.getNameSpace() - && xAttr.getName().equals(a.getName())) { - toGet.add(a); - foundIt = true; - break; - } - } - if (!foundIt) { - throw new IOException( - "At least one of the attributes provided was not found."); - } - } - return toGet; - } + return FSDirXAttrOp.getXAttrs(dir, src, xAttrs); } catch (AccessControlException e) { - logAuditEvent(false, "getXAttrs", srcArg); + logAuditEvent(false, "getXAttrs", src); throw e; } finally { readUnlock(); @@ -8104,23 +7987,11 @@ List getXAttrs(final String srcArg, List xAttrs) } List listXAttrs(String src) throws IOException { - checkXAttrsConfigFlag(); - final FSPermissionChecker pc = getPermissionChecker(); - final boolean isRawPath = FSDirectory.isReservedRawName(src); checkOperation(OperationCategory.READ); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { checkOperation(OperationCategory.READ); - src = dir.resolvePath(pc, src, pathComponents); - final INodesInPath iip = dir.getINodesInPath(src, true); - if (isPermissionEnabled) { - /* To access xattr names, you need EXECUTE in the owning directory. */ - dir.checkParentAccess(pc, iip, FsAction.EXECUTE); - } - final List all = dir.getXAttrs(src); - return XAttrPermissionFilter. - filterXAttrsForApi(pc, all, isRawPath); + return FSDirXAttrOp.listXAttrs(dir, src); } catch (AccessControlException e) { logAuditEvent(false, "listXAttrs", src); throw e; @@ -8129,77 +8000,23 @@ List listXAttrs(String src) throws IOException { } } - /** - * Remove an xattr for a file or directory. - * - * @param src - * - path to remove the xattr from - * @param xAttr - * - xAttr to remove - * @throws AccessControlException - * @throws SafeModeException - * @throws UnresolvedLinkException - * @throws IOException - */ void removeXAttr(String src, XAttr xAttr, boolean logRetryCache) throws IOException { - try { - removeXAttrInt(src, xAttr, logRetryCache); - } catch (AccessControlException e) { - logAuditEvent(false, "removeXAttr", src); - throw e; - } - } - - void removeXAttrInt(final String srcArg, XAttr xAttr, boolean logRetryCache) - throws IOException { - String src = srcArg; - checkXAttrsConfigFlag(); - HdfsFileStatus resultingStat = null; - FSPermissionChecker pc = getPermissionChecker(); - XAttrPermissionFilter.checkPermissionForApi(pc, xAttr, - FSDirectory.isReservedRawName(src)); checkOperation(OperationCategory.WRITE); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + HdfsFileStatus auditStat = null; writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot remove XAttr entry on " + src); - src = dir.resolvePath(pc, src, pathComponents); - final INodesInPath iip = dir.getINodesInPath4Write(src); - checkXAttrChangeAccess(iip, xAttr, pc); - - List xAttrs = Lists.newArrayListWithCapacity(1); - xAttrs.add(xAttr); - List removedXAttrs = dir.removeXAttrs(src, xAttrs); - if (removedXAttrs != null && !removedXAttrs.isEmpty()) { - getEditLog().logRemoveXAttrs(src, removedXAttrs, logRetryCache); - } else { - throw new IOException( - "No matching attributes found for remove operation"); - } - resultingStat = getAuditFileInfo(src, false); + auditStat = FSDirXAttrOp.removeXAttr(dir, src, xAttr, logRetryCache); + } catch (AccessControlException e) { + logAuditEvent(false, "removeXAttr", src); + throw e; } finally { writeUnlock(); } getEditLog().logSync(); - logAuditEvent(true, "removeXAttr", srcArg, null, resultingStat); - } - - private void checkXAttrChangeAccess(INodesInPath iip, XAttr xAttr, - FSPermissionChecker pc) throws AccessControlException { - if (isPermissionEnabled && xAttr.getNameSpace() == XAttr.NameSpace.USER) { - final INode inode = iip.getLastINode(); - if (inode != null && - inode.isDirectory() && - inode.getFsPermission().getStickyBit()) { - if (!pc.isSuperUser()) { - dir.checkOwner(pc, iip); - } - } else { - dir.checkPathAccess(pc, iip, FsAction.WRITE); - } - } + logAuditEvent(true, "removeXAttr", src, null, auditStat); } void checkAccess(String src, FsAction mode) throws IOException { @@ -8311,13 +8128,5 @@ private static void enableAsyncAuditLog() { } } - private void checkXAttrsConfigFlag() throws IOException { - if (!xattrsEnabled) { - throw new IOException(String.format( - "The XAttr operation has been rejected. " - + "Support for XAttrs has been disabled by setting %s to false.", - DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY)); - } - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java index ad067cfa197fd..11905bd447978 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -201,8 +200,9 @@ public void testINodeXAttrsLimit() throws Exception { List newXAttrs = Lists.newArrayListWithCapacity(2); newXAttrs.add(newSystemXAttr); newXAttrs.add(newRawXAttr); - List xAttrs = fsdir.setINodeXAttrs(existingXAttrs, newXAttrs, - EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE)); + List xAttrs = FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, + newXAttrs, EnumSet.of( + XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE)); assertEquals(xAttrs.size(), 4); // Adding a trusted namespace xAttr, is affected by inode xAttrs limit. @@ -211,8 +211,9 @@ public void testINodeXAttrsLimit() throws Exception { setValue(new byte[]{0x34, 0x34, 0x34}).build(); newXAttrs.set(0, newXAttr1); try { - fsdir.setINodeXAttrs(existingXAttrs, newXAttrs, - EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE)); + FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, newXAttrs, + EnumSet.of(XAttrSetFlag.CREATE, + XAttrSetFlag.REPLACE)); fail("Setting user visible xattr on inode should fail if " + "reaching limit."); } catch (IOException e) { @@ -275,8 +276,9 @@ public void testXAttrMultiSetRemove() throws Exception { for (int i = 0; i < toAdd.size(); i++) { LOG.info("Will add XAttr " + toAdd.get(i)); } - List newXAttrs = fsdir.setINodeXAttrs(existingXAttrs, toAdd, - EnumSet.of(XAttrSetFlag.CREATE)); + List newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, + toAdd, EnumSet.of( + XAttrSetFlag.CREATE)); verifyXAttrsPresent(newXAttrs, numExpectedXAttrs); existingXAttrs = newXAttrs; } @@ -296,8 +298,9 @@ public void testXAttrMultiSetRemove() throws Exception { final int expectedNumToRemove = toRemove.size(); LOG.info("Attempting to remove " + expectedNumToRemove + " XAttrs"); List removedXAttrs = Lists.newArrayList(); - List newXAttrs = fsdir.filterINodeXAttrs(existingXAttrs, - toRemove, removedXAttrs); + List newXAttrs = FSDirXAttrOp.filterINodeXAttrs(existingXAttrs, + toRemove, + removedXAttrs); assertEquals("Unexpected number of removed XAttrs", expectedNumToRemove, removedXAttrs.size()); verifyXAttrsPresent(newXAttrs, numExpectedXAttrs); @@ -316,8 +319,8 @@ public void testXAttrMultiAddRemoveErrors() throws Exception { toAdd.add(generatedXAttrs.get(2)); toAdd.add(generatedXAttrs.get(0)); try { - fsdir.setINodeXAttrs(existingXAttrs, toAdd, EnumSet.of(XAttrSetFlag - .CREATE)); + FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, toAdd, + EnumSet.of(XAttrSetFlag.CREATE)); fail("Specified the same xattr to be set twice"); } catch (IOException e) { GenericTestUtils.assertExceptionContains("Cannot specify the same " + @@ -328,15 +331,15 @@ public void testXAttrMultiAddRemoveErrors() throws Exception { toAdd.remove(generatedXAttrs.get(0)); existingXAttrs.add(generatedXAttrs.get(0)); try { - fsdir.setINodeXAttrs(existingXAttrs, toAdd, EnumSet.of(XAttrSetFlag - .CREATE)); + FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, toAdd, + EnumSet.of(XAttrSetFlag.CREATE)); fail("Set XAttr that is already set without REPLACE flag"); } catch (IOException e) { GenericTestUtils.assertExceptionContains("already exists", e); } try { - fsdir.setINodeXAttrs(existingXAttrs, toAdd, EnumSet.of(XAttrSetFlag - .REPLACE)); + FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, toAdd, + EnumSet.of(XAttrSetFlag.REPLACE)); fail("Set XAttr that does not exist without the CREATE flag"); } catch (IOException e) { GenericTestUtils.assertExceptionContains("does not exist", e); @@ -344,8 +347,9 @@ public void testXAttrMultiAddRemoveErrors() throws Exception { // Sanity test for CREATE toAdd.remove(generatedXAttrs.get(0)); - List newXAttrs = fsdir.setINodeXAttrs(existingXAttrs, toAdd, - EnumSet.of(XAttrSetFlag.CREATE)); + List newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, + toAdd, EnumSet.of( + XAttrSetFlag.CREATE)); assertEquals("Unexpected toAdd size", 2, toAdd.size()); for (XAttr x : toAdd) { assertTrue("Did not find added XAttr " + x, newXAttrs.contains(x)); @@ -362,8 +366,8 @@ public void testXAttrMultiAddRemoveErrors() throws Exception { .build(); toAdd.add(xAttr); } - newXAttrs = fsdir.setINodeXAttrs(existingXAttrs, toAdd, - EnumSet.of(XAttrSetFlag.REPLACE)); + newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, toAdd, + EnumSet.of(XAttrSetFlag.REPLACE)); assertEquals("Unexpected number of new XAttrs", 3, newXAttrs.size()); for (int i=0; i<3; i++) { assertArrayEquals("Unexpected XAttr value", @@ -376,8 +380,9 @@ public void testXAttrMultiAddRemoveErrors() throws Exception { for (int i=0; i<4; i++) { toAdd.add(generatedXAttrs.get(i)); } - newXAttrs = fsdir.setINodeXAttrs(existingXAttrs, toAdd, - EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE)); + newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, toAdd, + EnumSet.of(XAttrSetFlag.CREATE, + XAttrSetFlag.REPLACE)); verifyXAttrsPresent(newXAttrs, 4); } } From c782cd81598239417247698f49b48058ab378bcf Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Mon, 8 Dec 2014 13:44:44 -0800 Subject: [PATCH 008/432] HADOOP-11329. Add JAVA_LIBRARY_PATH to KMS startup options. Contributed by Arun Suresh. --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ .../hadoop-kms/src/main/conf/kms-env.sh | 6 ++++++ hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh | 11 ++++++++++- .../hadoop-kms/src/site/apt/index.apt.vm | 9 +++++++++ 4 files changed, 27 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index d4962766bed52..d9219cc9ed323 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -519,6 +519,8 @@ Release 2.7.0 - UNRELEASED HADOOP-11354. ThrottledInputStream doesn't perform effective throttling. (Ted Yu via jing9) + HADOOP-11329. Add JAVA_LIBRARY_PATH to KMS startup options. (Arun Suresh via wang) + Release 2.6.0 - 2014-11-18 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh b/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh index 88a2b8644ea0d..44dfe6afcdd9f 100644 --- a/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh +++ b/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh @@ -47,3 +47,9 @@ # The password of the SSL keystore if using SSL # # export KMS_SSL_KEYSTORE_PASS=password + +# The full path to any native libraries that need to be loaded +# (For eg. location of natively compiled tomcat Apache portable +# runtime (APR) libraries +# +# export JAVA_LIBRARY_PATH=${HOME}/lib/native diff --git a/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh b/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh index 24a1f54f964c2..f6ef6a5d0cc40 100644 --- a/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh +++ b/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh @@ -31,7 +31,15 @@ BASEDIR=`cd ${BASEDIR}/..;pwd` KMS_SILENT=${KMS_SILENT:-true} -source ${HADOOP_LIBEXEC_DIR:-${BASEDIR}/libexec}/kms-config.sh +HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-${BASEDIR}/libexec}" +source ${HADOOP_LIBEXEC_DIR}/kms-config.sh + + +if [ "x$JAVA_LIBRARY_PATH" = "x" ]; then + JAVA_LIBRARY_PATH="${HADOOP_LIBEXEC_DIR}/../lib/native/" +else + JAVA_LIBRARY_PATH="${HADOOP_LIBEXEC_DIR}/../lib/native/:${JAVA_LIBRARY_PATH}" +fi # The Java System property 'kms.http.port' it is not used by Kms, # it is used in Tomcat's server.xml configuration file @@ -50,6 +58,7 @@ catalina_opts="${catalina_opts} -Dkms.admin.port=${KMS_ADMIN_PORT}"; catalina_opts="${catalina_opts} -Dkms.http.port=${KMS_HTTP_PORT}"; catalina_opts="${catalina_opts} -Dkms.max.threads=${KMS_MAX_THREADS}"; catalina_opts="${catalina_opts} -Dkms.ssl.keystore.file=${KMS_SSL_KEYSTORE_FILE}"; +catalina_opts="${catalina_opts} -Djava.library.path=${JAVA_LIBRARY_PATH}"; print "Adding to CATALINA_OPTS: ${catalina_opts}" print "Found KMS_SSL_KEYSTORE_PASS: `echo ${KMS_SSL_KEYSTORE_PASS} | sed 's/./*/g'`" diff --git a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm index 11b84d39c786e..80d9a4858afa8 100644 --- a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm +++ b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm @@ -159,6 +159,15 @@ hadoop-${project.version} $ sbin/kms.sh start NOTE: You need to restart the KMS for the configuration changes to take effect. +** Loading native libraries + + The following environment variable (which can be set in KMS's + <<>> script) can be used to specify the location + of any required native libraries. For eg. Tomact native Apache Portable + Runtime (APR) libraries: + + * JAVA_LIBRARY_PATH + ** KMS Security Configuration *** Enabling Kerberos HTTP SPNEGO Authentication From 0e453c5295a9cf79aa3e3d5820d5e247bb45243c Mon Sep 17 00:00:00 2001 From: Haohui Mai Date: Mon, 8 Dec 2014 21:10:32 -0800 Subject: [PATCH 009/432] HADOOP-11287. Simplify UGI#reloginFromKeytab for Java 7+. Contributed by Li Lu. --- .../hadoop-common/CHANGES.txt | 3 +++ .../hadoop/security/UserGroupInformation.java | 18 ++---------------- 2 files changed, 5 insertions(+), 16 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index d9219cc9ed323..4b998d0dd79c6 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -418,6 +418,9 @@ Release 2.7.0 - UNRELEASED HADOOP-11313. Adding a document about NativeLibraryChecker. (Tsuyoshi OZAWA via cnauroth) + HADOOP-11287. Simplify UGI#reloginFromKeytab for Java 7+. + (Li Lu via wheat9) + OPTIMIZATIONS HADOOP-11323. WritableComparator#compare keeps reference to byte array. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 0541f9d9cd00d..4b0b5f305fec1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -44,9 +44,9 @@ import javax.security.auth.Subject; import javax.security.auth.callback.CallbackHandler; -import javax.security.auth.kerberos.KerberosKey; import javax.security.auth.kerberos.KerberosPrincipal; import javax.security.auth.kerberos.KerberosTicket; +import javax.security.auth.kerberos.KeyTab; import javax.security.auth.login.AppConfigurationEntry; import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag; import javax.security.auth.login.LoginContext; @@ -610,20 +610,6 @@ private void setLogin(LoginContext login) { user.setLogin(login); } - private static Class KEY_TAB_CLASS = KerberosKey.class; - static { - try { - // We use KEY_TAB_CLASS to determine if the UGI is logged in from - // keytab. In JDK6 and JDK7, if useKeyTab and storeKey are specified - // in the Krb5LoginModule, then some number of KerberosKey objects - // are added to the Subject's private credentials. However, in JDK8, - // a KeyTab object is added instead. More details in HADOOP-10786. - KEY_TAB_CLASS = Class.forName("javax.security.auth.kerberos.KeyTab"); - } catch (ClassNotFoundException cnfe) { - // Ignore. javax.security.auth.kerberos.KeyTab does not exist in JDK6. - } - } - /** * Create a UserGroupInformation for the given subject. * This does not change the subject or acquire new credentials. @@ -632,7 +618,7 @@ private void setLogin(LoginContext login) { UserGroupInformation(Subject subject) { this.subject = subject; this.user = subject.getPrincipals(User.class).iterator().next(); - this.isKeytab = !subject.getPrivateCredentials(KEY_TAB_CLASS).isEmpty(); + this.isKeytab = !subject.getPrivateCredentials(KeyTab.class).isEmpty(); this.isKrbTkt = !subject.getPrivateCredentials(KerberosTicket.class).isEmpty(); } From a6df976936eca86bf6c119195f7f97a66d0e5206 Mon Sep 17 00:00:00 2001 From: Karthik Kambatla Date: Mon, 8 Dec 2014 22:18:32 -0800 Subject: [PATCH 010/432] YARN-2931. PublicLocalizer may fail until directory is initialized by LocalizeRunner. (Anubhav Dhoot via kasha) --- hadoop-yarn-project/CHANGES.txt | 3 + .../ResourceLocalizationService.java | 6 + .../TestResourceLocalizationService.java | 110 +++++++++++++++++- 3 files changed, 113 insertions(+), 6 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 43b19ec515330..d06c8312c5ff0 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -200,6 +200,9 @@ Release 2.7.0 - UNRELEASED YARN-2927. [YARN-1492] InMemorySCMStore properties are inconsistent. (Ray Chiang via kasha) + YARN-2931. PublicLocalizer may fail until directory is initialized by + LocalizeRunner. (Anubhav Dhoot via kasha) + Release 2.6.0 - 2014-11-18 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index f4b6221a7bf4e..5440980590705 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -775,6 +775,12 @@ public void addResource(LocalizerResourceRequestEvent request) { if (!publicDirDestPath.getParent().equals(publicRootPath)) { DiskChecker.checkDir(new File(publicDirDestPath.toUri().getPath())); } + + // In case this is not a newly initialized nm state, ensure + // initialized local/log dirs similar to LocalizerRunner + getInitializedLocalDirs(); + getInitializedLogDirs(); + // explicitly synchronize pending here to avoid future task // completing and being dequeued before pending updated synchronized (pending) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java index 1051e7acfc1fe..f968bb9f39eb3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java @@ -32,18 +32,16 @@ import static org.mockito.Matchers.isA; import static org.mockito.Matchers.isNull; import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; import java.io.File; -import java.io.FileNotFoundException; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; @@ -63,10 +61,7 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.Future; -import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.Options; -import org.apache.hadoop.fs.UnresolvedLinkException; -import org.apache.hadoop.security.AccessControlException; import org.junit.Assert; import org.apache.commons.io.FileUtils; @@ -940,6 +935,109 @@ public boolean matches(Object o) { } } + @Test + @SuppressWarnings("unchecked") + public void testPublicResourceInitializesLocalDir() throws Exception { + + // Setup state to simulate restart NM with existing state meaning no + // directory creation during initialization + NMStateStoreService spyStateStore = spy(nmContext.getNMStateStore()); + when(spyStateStore.canRecover()).thenReturn(true); + NMContext spyContext = spy(nmContext); + when(spyContext.getNMStateStore()).thenReturn(spyStateStore); + + List localDirs = new ArrayList(); + String[] sDirs = new String[4]; + for (int i = 0; i < 4; ++i) { + localDirs.add(lfs.makeQualified(new Path(basedir, i + ""))); + sDirs[i] = localDirs.get(i).toString(); + } + conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs); + + + DrainDispatcher dispatcher = new DrainDispatcher(); + EventHandler applicationBus = mock(EventHandler.class); + dispatcher.register(ApplicationEventType.class, applicationBus); + EventHandler containerBus = mock(EventHandler.class); + dispatcher.register(ContainerEventType.class, containerBus); + + ContainerExecutor exec = mock(ContainerExecutor.class); + DeletionService delService = mock(DeletionService.class); + LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService(); + dirsHandler.init(conf); + + dispatcher.init(conf); + dispatcher.start(); + + try { + ResourceLocalizationService rawService = + new ResourceLocalizationService(dispatcher, exec, delService, + dirsHandler, spyContext); + ResourceLocalizationService spyService = spy(rawService); + doReturn(mockServer).when(spyService).createServer(); + doReturn(lfs).when(spyService).getLocalFileContext( + isA(Configuration.class)); + + spyService.init(conf); + spyService.start(); + + final FsPermission defaultPerm = new FsPermission((short)0755); + + // verify directory is not created at initialization + for (Path p : localDirs) { + p = new Path((new URI(p.toString())).getPath()); + Path publicCache = new Path(p, ContainerLocalizer.FILECACHE); + verify(spylfs, never()) + .mkdir(eq(publicCache),eq(defaultPerm), eq(true)); + } + + final String user = "user0"; + // init application + final Application app = mock(Application.class); + final ApplicationId appId = + BuilderUtils.newApplicationId(314159265358979L, 3); + when(app.getUser()).thenReturn(user); + when(app.getAppId()).thenReturn(appId); + spyService.handle(new ApplicationLocalizationEvent( + LocalizationEventType.INIT_APPLICATION_RESOURCES, app)); + dispatcher.await(); + + // init container. + final Container c = getMockContainer(appId, 42, user); + + // init resources + Random r = new Random(); + long seed = r.nextLong(); + System.out.println("SEED: " + seed); + r.setSeed(seed); + + // Queue up public resource localization + final LocalResource pubResource = getPublicMockedResource(r); + final LocalResourceRequest pubReq = new LocalResourceRequest(pubResource); + + Map> req = + new HashMap>(); + req.put(LocalResourceVisibility.PUBLIC, + Collections.singletonList(pubReq)); + + Set pubRsrcs = new HashSet(); + pubRsrcs.add(pubReq); + + spyService.handle(new ContainerLocalizationRequestEvent(c, req)); + dispatcher.await(); + + // verify directory creation + for (Path p : localDirs) { + p = new Path((new URI(p.toString())).getPath()); + Path publicCache = new Path(p, ContainerLocalizer.FILECACHE); + verify(spylfs).mkdir(eq(publicCache),eq(defaultPerm), eq(true)); + } + } finally { + dispatcher.stop(); + } + } + @Test(timeout=20000) @SuppressWarnings("unchecked") // mocked generics public void testFailedPublicResource() throws Exception { From b067d33e9e47417340b7c907c4951c033f87cf08 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Tue, 9 Dec 2014 11:04:59 +0000 Subject: [PATCH 011/432] HADOOP-11363 Hadoop maven surefire-plugin uses must set heap size. --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ hadoop-project/pom.xml | 10 ++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 4b998d0dd79c6..c5aa954daa413 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -524,6 +524,8 @@ Release 2.7.0 - UNRELEASED HADOOP-11329. Add JAVA_LIBRARY_PATH to KMS startup options. (Arun Suresh via wang) + HADOOP-11363 Hadoop maven surefire-plugin uses must set heap size. (stevel) + Release 2.6.0 - 2014-11-18 INCOMPATIBLE CHANGES diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 3b52dc3bb190e..7c492c8a0083b 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -84,6 +84,12 @@ --> [${javac.version},) [3.0.2,) + + + -Xmx4096m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError + 2.17 + ${maven-surefire-plugin.version} + ${maven-surefire-plugin.version} @@ -950,7 +956,7 @@ org.apache.maven.plugins maven-surefire-plugin - 2.16 + ${maven-surefire-plugin.version} org.apache.maven.plugins @@ -1061,7 +1067,7 @@ false 900 - -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError + ${maven-surefire-plugin.argLine} ${hadoop.common.build.dir} From ffcfce25def14b0a116ecf7f01910ae72fa0fe19 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Tue, 9 Dec 2014 11:15:19 +0000 Subject: [PATCH 012/432] HADOOP-10134 [JDK8] Fix Javadoc errors caused by incorrect or illegal tags in doc comments. --- .../client/AuthenticatedURL.java | 13 +++--- .../authentication/client/Authenticator.java | 2 +- .../client/KerberosAuthenticator.java | 10 ++-- .../client/PseudoAuthenticator.java | 10 ++-- .../AltKerberosAuthenticationHandler.java | 5 +- .../server/AuthenticationFilter.java | 46 ++++++++++--------- .../server/AuthenticationHandler.java | 21 ++++----- .../server/AuthenticationToken.java | 4 +- .../server/KerberosAuthenticationHandler.java | 11 ++--- .../server/PseudoAuthenticationHandler.java | 17 ++++--- .../authentication/util/KerberosName.java | 4 +- .../authentication/util/KerberosUtil.java | 8 ++-- .../security/authentication/util/Signer.java | 2 - .../util/ZKSignerSecretProvider.java | 8 ++-- .../hadoop-common/CHANGES.txt | 3 ++ .../org/apache/hadoop/minikdc/MiniKdc.java | 10 ++-- .../apache/hadoop/maven/plugin/util/Exec.java | 4 +- .../maven/plugin/util/FileSetUtils.java | 2 +- 18 files changed, 86 insertions(+), 94 deletions(-) diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java index 61c3c6d5f53c3..c50a5164a5780 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java @@ -24,19 +24,18 @@ /** * The {@link AuthenticatedURL} class enables the use of the JDK {@link URL} class * against HTTP endpoints protected with the {@link AuthenticationFilter}. - *

+ *

* The authentication mechanisms supported by default are Hadoop Simple authentication * (also known as pseudo authentication) and Kerberos SPNEGO authentication. - *

+ *

* Additional authentication mechanisms can be supported via {@link Authenticator} implementations. - *

+ *

* The default {@link Authenticator} is the {@link KerberosAuthenticator} class which supports * automatic fallback from Kerberos SPNEGO to Hadoop Simple authentication. - *

+ *

* AuthenticatedURL instances are not thread-safe. - *

+ *

* The usage pattern of the {@link AuthenticatedURL} is: - *

*

  *
  * // establishing an initial connection
@@ -240,7 +239,7 @@ public static void injectToken(HttpURLConnection conn, Token token) {
 
   /**
    * Helper method that extracts an authentication token received from a connection.
-   * 

+ *

* This method is used by {@link Authenticator} implementations. * * @param conn connection to extract the authentication token from. diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/Authenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/Authenticator.java index e7bae4a891593..6828970fdbb59 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/Authenticator.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/Authenticator.java @@ -19,7 +19,7 @@ /** * Interface for client authentication mechanisms. - *

+ *

* Implementations are use-once instances, they don't need to be thread safe. */ public interface Authenticator { diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java index 928866c532cce..323b019eb827b 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java @@ -43,9 +43,9 @@ /** * The {@link KerberosAuthenticator} implements the Kerberos SPNEGO authentication sequence. - *

+ *

* It uses the default principal for the Kerberos cache (normally set via kinit). - *

+ *

* It falls back to the {@link PseudoAuthenticator} if the HTTP endpoint does not trigger an SPNEGO authentication * sequence. */ @@ -162,9 +162,9 @@ public void setConnectionConfigurator(ConnectionConfigurator configurator) { /** * Performs SPNEGO authentication against the specified URL. - *

+ *

* If a token is given it does a NOP and returns the given token. - *

+ *

* If no token is given, it will perform the SPNEGO authentication sequence using an * HTTP OPTIONS request. * @@ -211,7 +211,7 @@ public void authenticate(URL url, AuthenticatedURL.Token token) /** * If the specified URL does not support SPNEGO authentication, a fallback {@link Authenticator} will be used. - *

+ *

* This implementation returns a {@link PseudoAuthenticator}. * * @return the fallback {@link Authenticator}. diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java index f534be9b20bc0..46d94b88dec67 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java @@ -20,7 +20,7 @@ /** * The {@link PseudoAuthenticator} implementation provides an authentication equivalent to Hadoop's * Simple authentication, it trusts the value of the 'user.name' Java System property. - *

+ *

* The 'user.name' value is propagated using an additional query string parameter {@link #USER_NAME} ('user.name'). */ public class PseudoAuthenticator implements Authenticator { @@ -47,13 +47,13 @@ public void setConnectionConfigurator(ConnectionConfigurator configurator) { /** * Performs simple authentication against the specified URL. - *

+ *

* If a token is given it does a NOP and returns the given token. - *

+ *

* If no token is given, it will perform an HTTP OPTIONS request injecting an additional * parameter {@link #USER_NAME} in the query string with the value returned by the {@link #getUserName()} * method. - *

+ *

* If the response is successful it will update the authentication token. * * @param url the URl to authenticate against. @@ -79,7 +79,7 @@ public void authenticate(URL url, AuthenticatedURL.Token token) throws IOExcepti /** * Returns the current user name. - *

+ *

* This implementation returns the value of the Java system property 'user.name' * * @return the current user name. diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AltKerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AltKerberosAuthenticationHandler.java index e786e37df8ed7..987330fa0e5be 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AltKerberosAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AltKerberosAuthenticationHandler.java @@ -28,7 +28,6 @@ * to allow a developer to implement their own custom authentication for browser * access. The alternateAuthenticate method will be called whenever a request * comes from a browser. - *

*/ public abstract class AltKerberosAuthenticationHandler extends KerberosAuthenticationHandler { @@ -52,7 +51,6 @@ public abstract class AltKerberosAuthenticationHandler /** * Returns the authentication type of the authentication handler, * 'alt-kerberos'. - *

* * @return the authentication type of the authentication handler, * 'alt-kerberos'. @@ -80,7 +78,6 @@ public void init(Properties config) throws ServletException { * completed successfully (in the case of Java access) and only after the * custom authentication implemented by the subclass in alternateAuthenticate * has completed successfully (in the case of browser access). - *

* * @param request the HTTP client request. * @param response the HTTP client response. @@ -109,7 +106,7 @@ public AuthenticationToken authenticate(HttpServletRequest request, * refers to a browser. If its not a browser, then Kerberos authentication * will be used; if it is a browser, alternateAuthenticate from the subclass * will be used. - *

+ *

* A User-Agent String is considered to be a browser if it does not contain * any of the values from alt-kerberos.non-browser.user-agents; the default * behavior is to consider everything a browser unless it contains one of: diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java index 0ac352ba2d116..e891ed2623dd5 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java @@ -44,18 +44,20 @@ import java.util.*; /** - * The {@link AuthenticationFilter} enables protecting web application resources with different (pluggable) + *

The {@link AuthenticationFilter} enables protecting web application + * resources with different (pluggable) * authentication mechanisms and signer secret providers. - *

+ *

+ *

* Out of the box it provides 2 authentication mechanisms: Pseudo and Kerberos SPNEGO. - *

+ *

* Additional authentication mechanisms are supported via the {@link AuthenticationHandler} interface. - *

+ *

* This filter delegates to the configured authentication handler for authentication and once it obtains an * {@link AuthenticationToken} from it, sets a signed HTTP cookie with the token. For client requests * that provide the signed HTTP cookie, it verifies the validity of the cookie, extracts the user information * and lets the request proceed to the target resource. - *

+ *

* The supported configuration properties are: *
    *
  • config.prefix: indicates the prefix to be used by all other configuration properties, the default value @@ -73,18 +75,19 @@ *
  • [#PREFIX#.]cookie.domain: domain to use for the HTTP cookie that stores the authentication token.
  • *
  • [#PREFIX#.]cookie.path: path to use for the HTTP cookie that stores the authentication token.
  • *
- *

+ *

* The rest of the configuration properties are specific to the {@link AuthenticationHandler} implementation and the * {@link AuthenticationFilter} will take all the properties that start with the prefix #PREFIX#, it will remove * the prefix from it and it will pass them to the the authentication handler for initialization. Properties that do * not start with the prefix will not be passed to the authentication handler initialization. - *

+ *

+ *

* Out of the box it provides 3 signer secret provider implementations: * "string", "random", and "zookeeper" - *

+ *

* Additional signer secret providers are supported via the * {@link SignerSecretProvider} class. - *

+ *

* For the HTTP cookies mentioned above, the SignerSecretProvider is used to * determine the secret to use for signing the cookies. Different * implementations can have different behaviors. The "string" implementation @@ -94,7 +97,7 @@ * [#PREFIX#.]token.validity mentioned above. The "zookeeper" implementation * is like the "random" one, except that it synchronizes the random secret * and rollovers between multiple servers; it's meant for HA services. - *

+ *

* The relevant configuration properties are: *
    *
  • signer.secret.provider: indicates the name of the SignerSecretProvider @@ -108,10 +111,10 @@ * implementations are specified, this value is used as the rollover * interval.
  • *
- *

+ *

* The "zookeeper" implementation has additional configuration properties that * must be specified; see {@link ZKSignerSecretProvider} for details. - *

+ *

* For subclasses of AuthenticationFilter that want additional control over the * SignerSecretProvider, they can use the following attribute set in the * ServletContext: @@ -190,10 +193,9 @@ public class AuthenticationFilter implements Filter { private String cookiePath; /** - * Initializes the authentication filter and signer secret provider. - *

- * It instantiates and initializes the specified {@link AuthenticationHandler}. - *

+ *

Initializes the authentication filter and signer secret provider.

+ * It instantiates and initializes the specified {@link + * AuthenticationHandler}. * * @param filterConfig filter configuration. * @@ -375,7 +377,7 @@ protected String getCookiePath() { /** * Destroys the filter. - *

+ *

* It invokes the {@link AuthenticationHandler#destroy()} method to release any resources it may hold. */ @Override @@ -393,7 +395,7 @@ public void destroy() { * Returns the filtered configuration (only properties starting with the specified prefix). The property keys * are also trimmed from the prefix. The returned {@link Properties} object is used to initialized the * {@link AuthenticationHandler}. - *

+ *

* This method can be overriden by subclasses to obtain the configuration from other configuration source than * the web.xml file. * @@ -419,7 +421,7 @@ protected Properties getConfiguration(String configPrefix, FilterConfig filterCo /** * Returns the full URL of the request including the query string. - *

+ *

* Used as a convenience method for logging purposes. * * @param request the request object. @@ -436,11 +438,11 @@ protected String getRequestURL(HttpServletRequest request) { /** * Returns the {@link AuthenticationToken} for the request. - *

+ *

* It looks at the received HTTP cookies and extracts the value of the {@link AuthenticatedURL#AUTH_COOKIE} * if present. It verifies the signature and if correct it creates the {@link AuthenticationToken} and returns * it. - *

+ *

* If this method returns null the filter will invoke the configured {@link AuthenticationHandler} * to perform user authentication. * @@ -597,7 +599,7 @@ protected void doFilter(FilterChain filterChain, HttpServletRequest request, * * @param token authentication token for the cookie. * @param expires UNIX timestamp that indicates the expire date of the - * cookie. It has no effect if its value < 0. + * cookie. It has no effect if its value < 0. * * XXX the following code duplicate some logic in Jetty / Servlet API, * because of the fact that Hadoop is stuck at servlet 2.5 and jetty 6 diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java index 04984be5a7d58..797e95a689dd5 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java @@ -24,9 +24,7 @@ /** * Interface for server authentication mechanisms. - *

* The {@link AuthenticationFilter} manages the lifecycle of the authentication handler. - *

* Implementations must be thread-safe as one instance is initialized and used for all requests. */ public interface AuthenticationHandler { @@ -35,7 +33,6 @@ public interface AuthenticationHandler { /** * Returns the authentication type of the authentication handler. - *

* This should be a name that uniquely identifies the authentication type. * For example 'simple' or 'kerberos'. * @@ -45,7 +42,7 @@ public interface AuthenticationHandler { /** * Initializes the authentication handler instance. - *

+ *

* This method is invoked by the {@link AuthenticationFilter#init} method. * * @param config configuration properties to initialize the handler. @@ -56,21 +53,21 @@ public interface AuthenticationHandler { /** * Destroys the authentication handler instance. - *

+ *

* This method is invoked by the {@link AuthenticationFilter#destroy} method. */ public void destroy(); /** * Performs an authentication management operation. - *

+ *

* This is useful for handling operations like get/renew/cancel * delegation tokens which are being handled as operations of the * service end-point. - *

+ *

* If the method returns TRUE the request will continue normal * processing, this means the method has not produced any HTTP response. - *

+ *

* If the method returns FALSE the request will end, this means * the method has produced the corresponding HTTP response. * @@ -91,17 +88,17 @@ public boolean managementOperation(AuthenticationToken token, /** * Performs an authentication step for the given HTTP client request. - *

+ *

* This method is invoked by the {@link AuthenticationFilter} only if the HTTP client request is * not yet authenticated. - *

+ *

* Depending upon the authentication mechanism being implemented, a particular HTTP client may * end up making a sequence of invocations before authentication is successfully established (this is * the case of Kerberos SPNEGO). - *

+ *

* This method must return an {@link AuthenticationToken} only if the the HTTP client request has * been successfully and fully authenticated. - *

+ *

* If the HTTP client request has not been completely authenticated, this method must take over * the corresponding HTTP response and it must return null. * diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java index ff68847c8a0bc..bb3e71da61c32 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java @@ -29,7 +29,7 @@ * The {@link AuthenticationToken} contains information about an authenticated * HTTP client and doubles as the {@link Principal} to be returned by * authenticated {@link HttpServletRequest}s - *

+ *

* The token can be serialized/deserialized to and from a string as it is sent * and received in HTTP client responses and requests as a HTTP cookie (this is * done by the {@link AuthenticationFilter}). @@ -170,7 +170,7 @@ public boolean isExpired() { /** * Returns the string representation of the token. - *

+ *

* This string representation is parseable by the {@link #parse} method. * * @return the string representation of the token. diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java index 92bc57c413b45..846541b162b5f 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java @@ -51,7 +51,7 @@ /** * The {@link KerberosAuthenticationHandler} implements the Kerberos SPNEGO authentication mechanism for HTTP. - *

+ *

* The supported configuration properties are: *