From 7ac4d11db2d935be435f9cd6dcc996acc38a4b40 Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Wed, 17 Mar 2021 14:33:00 -0700 Subject: [PATCH 01/16] HDFS-15850. Superuser actions should be reported to external enforcers --- .../router/RouterPermissionChecker.java | 1 - .../hdfs/server/namenode/FSDirAttrOp.java | 46 +++++++---- .../namenode/FSDirStatAndListingOp.java | 1 + .../hdfs/server/namenode/FSDirectory.java | 11 ++- .../hdfs/server/namenode/FSNamesystem.java | 76 +++++++++---------- .../server/namenode/FSPermissionChecker.java | 38 ++++++++-- .../namenode/INodeAttributeProvider.java | 28 ++++++- .../server/namenode/NameNodeRpcServer.java | 8 +- 8 files changed, 136 insertions(+), 73 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java index 23a3c6e759e7f..ac796ff823420 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java @@ -103,7 +103,6 @@ public void checkPermission(MountTable mountTable, FsAction access) * based on Datanode#checkSuperuserPrivilege(). * @throws AccessControlException If the user is not authorized. */ - @Override public void checkSuperuserPrivilege() throws AccessControlException { // Try to get the ugi in the RPC call. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index 173348f356473..b689305530fcf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -82,15 +82,24 @@ static FileStatus setOwner( fsd.writeLock(); try { iip = fsd.resolvePath(pc, src, DirOp.WRITE); + // Only the owner or super user can change the group or owner fsd.checkOwner(pc, iip); - if (!pc.isSuperUser()) { - if (username != null && !pc.getUser().equals(username)) { - throw new AccessControlException("User " + pc.getUser() - + " is not a super user (non-super user cannot change owner)."); - } - if (group != null && !pc.isMemberOfGroup(group)) { - throw new AccessControlException( - "User " + pc.getUser() + " does not belong to " + group); + // Only a super user can change ownership to a different user + // or group to a different group that the user doesn't belong to + if ((username != null && !pc.getUser().equals(username)) || + (group != null && !pc.isMemberOfGroup(group))) { + try { + // check if the user is superuser + pc.checkSuperuserPrivilege(iip.getPath()); + } catch (AccessControlException e) { + if (username != null && !pc.getUser().equals(username)) { + throw new AccessControlException("User " + pc.getUser() + + " is not a super user (non-super user cannot change owner)."); + } + if (group != null && !pc.isMemberOfGroup(group)) { + throw new AccessControlException( + "User " + pc.getUser() + " does not belong to " + group); + } } } changed = unprotectedSetOwner(fsd, iip, username, group); @@ -238,13 +247,20 @@ static void setQuota(FSDirectory fsd, FSPermissionChecker pc, String src, fsd.writeLock(); try { INodesInPath iip = fsd.resolvePath(pc, src, DirOp.WRITE); - if (fsd.isPermissionEnabled() && !pc.isSuperUser() && allowOwner) { - INodeDirectory parentDir= iip.getLastINode().getParent(); - if (parentDir == null || - !parentDir.getUserName().equals(pc.getUser())) { - throw new AccessControlException( - "Access denied for user " + pc.getUser() + - ". Superuser or owner of parent folder privilege is required"); + if (fsd.isPermissionEnabled()) { + if (allowOwner && !pc.isSuperUser()) { + try { + fsd.checkOwner(pc, iip.getParentINodesInPath()); + } catch(AccessControlException ace) { + throw new AccessControlException( + "Access denied for user " + pc.getUser() + + ". Superuser or owner of parent folder privilege" + + " is required"); + } + } else { + // At this point, it must be a super user. + // Call the external enforcer for audit. + pc.checkSuperuserPrivilege(iip.getPath()); } } INodeDirectory changed = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index dfacc491eae53..8529a3811e57d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -105,6 +105,7 @@ static HdfsFileStatus getFileInfo(FSDirectory fsd, FSPermissionChecker pc, // superuser to receive null instead. try { iip = fsd.resolvePath(pc, srcArg, dirOp); + pc.checkSuperuserPrivilege(iip.getPath()); } catch (AccessControlException ace) { return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 0e15921ba4953..9621a57de8fec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -717,18 +717,18 @@ public INodesInPath resolvePath(FSPermissionChecker pc, String src, byte[][] components = INode.getPathComponents(src); boolean isRaw = isReservedRawName(components); + components = resolveComponents(components, this); + INodesInPath iip = INodesInPath.resolve(rootDir, components, isRaw); if (isPermissionEnabled && pc != null && isRaw) { switch(dirOp) { case READ_LINK: case READ: break; default: - pc.checkSuperuserPrivilege(); + pc.checkSuperuserPrivilege(iip.getPath()); break; } } - components = resolveComponents(components, this); - INodesInPath iip = INodesInPath.resolve(rootDir, components, isRaw); // verify all ancestors are dirs and traversable. note that only // methods that create new namespace items have the signature to throw // PNDE @@ -1942,7 +1942,10 @@ void checkPermission(FSPermissionChecker pc, INodesInPath iip, boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess, FsAction access, FsAction subAccess, boolean ignoreEmptyDir) throws AccessControlException { - if (!pc.isSuperUser()) { + if (pc.isSuperUser()) { + // call the enforcer for audit + pc.checkSuperuserPrivilege(iip.getPath()); + } else { readLock(); try { pc.checkPermission(iip, doCheckOwner, ancestorAccess, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 7d9f78c0647fb..15eda598c50bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1907,7 +1907,7 @@ public BlocksWithLocations getBlocks(DatanodeID datanode, long size, long */ void metaSave(String filename) throws IOException { String operationName = "metaSave"; - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, null); checkOperation(OperationCategory.READ); readLock(); try { @@ -1954,7 +1954,7 @@ BatchedListEntries listOpenFiles(long prevId, EnumSet openFilesTypes, String path) throws IOException { INode.checkAbsolutePath(path); final String operationName = "listOpenFiles"; - checkSuperuserPrivilege(); + checkSuperuserPrivilege(operationName, path); checkOperation(OperationCategory.READ); BatchedListEntries batchedListEntries; String normalizedPath = new Path(path).toString(); // normalize path. @@ -2415,7 +2415,7 @@ private void checkStoragePolicyEnabled(final String operationNameReadable, } if (checkSuperUser && isStoragePolicySuperuserOnly) { checkSuperuserPrivilege( - CaseUtils.toCamelCase(operationNameReadable, false)); + CaseUtils.toCamelCase(operationNameReadable, false), null); } } @@ -3582,7 +3582,7 @@ void setQuota(String src, long nsQuota, long ssQuota, StorageType type) FSPermissionChecker.setOperationType(operationName); try { if(!allowOwnerSetQuota) { - checkSuperuserPrivilege(pc); + checkSuperuserPrivilege(operationName, src); } writeLock(); try { @@ -4860,7 +4860,7 @@ DatanodeInfo[] datanodeReport(final DatanodeReportType type) throws IOException { String operationName = "datanodeReport"; DatanodeInfo[] arr; - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, null); checkOperation(OperationCategory.UNCHECKED); readLock(); try { @@ -4884,7 +4884,7 @@ DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type ) throws IOException { String operationName = "getDatanodeStorageReport"; DatanodeStorageReport[] reports; - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, null); checkOperation(OperationCategory.UNCHECKED); readLock(); try { @@ -4907,7 +4907,7 @@ boolean saveNamespace(final long timeWindow, final long txGap) throws IOException { String operationName = "saveNamespace"; checkOperation(OperationCategory.UNCHECKED); - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, null); boolean saved = false; cpLock(); // Block if a checkpointing is in progress on standby. @@ -4940,7 +4940,7 @@ boolean saveNamespace(final long timeWindow, final long txGap) boolean restoreFailedStorage(String arg) throws IOException { String operationName = getFailedStorageCommand(arg); boolean val = false; - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, null); checkOperation(OperationCategory.UNCHECKED); cpLock(); // Block if a checkpointing is in progress on standby. writeLock(); @@ -4968,7 +4968,7 @@ Date getStartTime() { void finalizeUpgrade() throws IOException { String operationName = "finalizeUpgrade"; - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, null); checkOperation(OperationCategory.UNCHECKED); cpLock(); // Block if a checkpointing is in progress on standby. writeLock(); @@ -4985,7 +4985,7 @@ void finalizeUpgrade() throws IOException { void refreshNodes() throws IOException { String operationName = "refreshNodes"; checkOperation(OperationCategory.UNCHECKED); - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, null); getBlockManager().getDatanodeManager().refreshNodes(new HdfsConfiguration()); logAuditEvent(true, operationName, null); } @@ -4993,7 +4993,7 @@ void refreshNodes() throws IOException { void setBalancerBandwidth(long bandwidth) throws IOException { String operationName = "setBalancerBandwidth"; checkOperation(OperationCategory.WRITE); - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, null); getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth); logAuditEvent(true, operationName, null); } @@ -5002,7 +5002,7 @@ boolean setSafeMode(SafeModeAction action) throws IOException { String operationName = action.toString().toLowerCase(); boolean error = false; if (action != SafeModeAction.SAFEMODE_GET) { - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, null); switch(action) { case SAFEMODE_LEAVE: // leave safe mode leaveSafeMode(false); @@ -5158,7 +5158,7 @@ private synchronized void setManualAndResourceLowSafeMode(boolean manual, CheckpointSignature rollEditLog() throws IOException { String operationName = "rollEditLog"; CheckpointSignature result = null; - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, null); checkOperation(OperationCategory.JOURNAL); writeLock(); try { @@ -5225,14 +5225,14 @@ PermissionStatus createFsOwnerPermissions(FsPermission permission) { void checkSuperuserPrivilege() throws AccessControlException { if (isPermissionEnabled) { FSPermissionChecker pc = getPermissionChecker(); - pc.checkSuperuserPrivilege(); + pc.checkSuperuserPrivilege(null); } } - void checkSuperuserPrivilege(FSPermissionChecker pc) - throws AccessControlException { + void checkSuperuserPrivilege(String path) throws AccessControlException { if (isPermissionEnabled) { - pc.checkSuperuserPrivilege(); + FSPermissionChecker pc = getPermissionChecker(); + pc.checkSuperuserPrivilege(path); } } @@ -6011,7 +6011,8 @@ public String toString() { */ Collection listCorruptFileBlocks(String path, String[] cookieTab) throws IOException { - checkSuperuserPrivilege(); + final String operationName = "listCorruptFileBlocks"; + checkSuperuserPrivilege(operationName, path); checkOperation(OperationCategory.READ); int count = 0; @@ -6939,7 +6940,7 @@ public SnapshotManager getSnapshotManager() { void allowSnapshot(String path) throws IOException { checkOperation(OperationCategory.WRITE); final String operationName = "allowSnapshot"; - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, path); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -6956,7 +6957,7 @@ void allowSnapshot(String path) throws IOException { void disallowSnapshot(String path) throws IOException { checkOperation(OperationCategory.WRITE); final String operationName = "disallowSnapshot"; - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, path); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -7301,7 +7302,7 @@ void removeSnapshottableDirs(List toRemove) { RollingUpgradeInfo queryRollingUpgrade() throws IOException { final String operationName = "queryRollingUpgrade"; - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, null); checkOperation(OperationCategory.READ); readLock(); try { @@ -7321,7 +7322,7 @@ RollingUpgradeInfo queryRollingUpgrade() throws IOException { RollingUpgradeInfo startRollingUpgrade() throws IOException { final String operationName = "startRollingUpgrade"; - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, null); checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -7512,7 +7513,7 @@ void checkRollingUpgrade(String action) throws RollingUpgradeException { RollingUpgradeInfo finalizeRollingUpgrade() throws IOException { final String operationName = "finalizeRollingUpgrade"; - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, null); checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -7667,7 +7668,7 @@ void addCachePool(CachePoolInfo req, boolean logRetryCache) checkOperation(OperationCategory.WRITE); String poolInfoStr = null; try { - checkSuperuserPrivilege(); + checkSuperuserPrivilege(operationName, null); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -7694,7 +7695,7 @@ void modifyCachePool(CachePoolInfo req, boolean logRetryCache) String poolNameStr = "{poolName: " + (req == null ? null : req.getPoolName()) + "}"; try { - checkSuperuserPrivilege(); + checkSuperuserPrivilege(operationName, null); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -7721,7 +7722,7 @@ void removeCachePool(String cachePoolName, boolean logRetryCache) checkOperation(OperationCategory.WRITE); String poolNameStr = "{poolName: " + cachePoolName + "}"; try { - checkSuperuserPrivilege(); + checkSuperuserPrivilege(operationName, null); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -7926,8 +7927,7 @@ void createEncryptionZone(final String src, final String keyName, Metadata metadata = FSDirEncryptionZoneOp.ensureKeyIsInitialized(dir, keyName, src); final FSPermissionChecker pc = getPermissionChecker(); - FSPermissionChecker.setOperationType(operationName); - checkSuperuserPrivilege(pc); + checkSuperuserPrivilege(operationName, src); checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -7988,9 +7988,7 @@ BatchedListEntries listEncryptionZones(long prevId) final String operationName = "listEncryptionZones"; boolean success = false; checkOperation(OperationCategory.READ); - final FSPermissionChecker pc = getPermissionChecker(); - FSPermissionChecker.setOperationType(operationName); - checkSuperuserPrivilege(pc); + checkSuperuserPrivilege(operationName, null); readLock(); try { checkOperation(OperationCategory.READ); @@ -8006,12 +8004,13 @@ BatchedListEntries listEncryptionZones(long prevId) void reencryptEncryptionZone(final String zone, final ReencryptAction action, final boolean logRetryCache) throws IOException { + final String operationName = "reencryptEncryptionZone"; boolean success = false; try { Preconditions.checkNotNull(zone, "zone is null."); checkOperation(OperationCategory.WRITE); final FSPermissionChecker pc = dir.getPermissionChecker(); - checkSuperuserPrivilege(pc); + checkSuperuserPrivilege(operationName, zone); checkNameNodeSafeMode("NameNode in safemode, cannot " + action + " re-encryption on zone " + zone); reencryptEncryptionZoneInt(pc, zone, action, logRetryCache); @@ -8026,9 +8025,7 @@ BatchedListEntries listReencryptionStatus( final String operationName = "listReencryptionStatus"; boolean success = false; checkOperation(OperationCategory.READ); - final FSPermissionChecker pc = getPermissionChecker(); - FSPermissionChecker.setOperationType(operationName); - checkSuperuserPrivilege(pc); + checkSuperuserPrivilege(operationName, null); readLock(); try { checkOperation(OperationCategory.READ); @@ -8309,7 +8306,7 @@ void unsetErasureCodingPolicy(final String srcArg, public ECTopologyVerifierResult getECTopologyResultForPolicies( String[] policyNames) throws IOException { String operationName = "getECTopologyResultForPolicies"; - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, null); checkOperation(OperationCategory.UNCHECKED); ECTopologyVerifierResult result; readLock(); @@ -8871,12 +8868,13 @@ private ECTopologyVerifierResult getEcTopologyVerifierResultForEnabledPolicies() Arrays.asList(enabledEcPolicies)); } - // This method logs operatoinName without super user privilege. + // This method logs operationName without super user privilege. // It should be called without holding FSN lock. - void checkSuperuserPrivilege(String operationName) + void checkSuperuserPrivilege(String operationName, String path) throws IOException { try { - checkSuperuserPrivilege(); + FSPermissionChecker.setOperationType(operationName); + checkSuperuserPrivilege(path); } catch (AccessControlException ace) { logAuditEvent(false, operationName, null); throw ace; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index 3f80952a8e9af..b4ba65bbc6223 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer; +import org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AuthorizationContext; import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -144,18 +145,39 @@ private AccessControlEnforcer getAccessControlEnforcer() { ? attributeProvider.getExternalAccessControlEnforcer(this) : this; } + private AuthorizationContext getAuthorizationContextForSuperUser( + String path) { + String opType = operationType.get(); + + AuthorizationContext.Builder builder = + new INodeAttributeProvider.AuthorizationContext.Builder(); + builder.fsOwner(fsOwner). + supergroup(supergroup). + callerUgi(callerUgi). + operationName(opType). + callerContext(CallerContext.getCurrent()); + + // Add path to the context builder only if it is not null. + if (path != null && !path.isEmpty()) { + builder.path(path); + } + + return builder.build(); + } + /** - * Verify if the caller has the required permission. This will result into - * an exception if the caller is not allowed to access the resource. + * Checks if the caller has super user privileges. + * Throws {@link AccessControlException} for non super users. + * + * @param path The resource path for which permission is being requested. + * @throws AccessControlException if the caller is not a super user. */ - public void checkSuperuserPrivilege() + public void checkSuperuserPrivilege(String path) throws AccessControlException { - if (!isSuperUser()) { - throw new AccessControlException("Access denied for user " - + getUser() + ". Superuser privilege is required"); - } + getAccessControlEnforcer().checkSuperUserPermissionWithContext( + getAuthorizationContextForSuperUser(path)); } - + /** * Check whether current user have permissions to access the path. * Traverse is always checked. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java index e83c962a4a845..ec5a847bdffb1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java @@ -362,7 +362,7 @@ public interface AccessControlEnforcer { * Checks permission on a file system object. Has to throw an Exception * if the filesystem object is not accessible by the calling Ugi. * @param fsOwner Filesystem owner (The Namenode user) - * @param supergroup super user geoup + * @param supergroup super user group * @param callerUgi UserGroupInformation of the caller * @param inodeAttrs Array of INode attributes for each path element in the * the path @@ -393,7 +393,7 @@ public abstract void checkPermission(String fsOwner, String supergroup, /** * Checks permission on a file system object. Has to throw an Exception - * if the filesystem object is not accessessible by the calling Ugi. + * if the filesystem object is not accessible by the calling Ugi. * @param authzContext an {@link AuthorizationContext} object encapsulating * the various parameters required to authorize an * operation. @@ -405,6 +405,30 @@ default void checkPermissionWithContext(AuthorizationContext authzContext) + "implement the checkPermissionWithContext(AuthorizationContext) " + "API."); } + + /** + * Checks if the user belongs to superuser group. + * It throws an AccessControlException if user is not a superuser. + * + * @param authzContext an {@link AuthorizationContext} object encapsulating + * the various parameters required to authorize an + * operation. + * @throws AccessControlException - if user is not a super user or part + * of the super user group. + */ + default void checkSuperUserPermissionWithContext ( + AuthorizationContext authzContext) + throws AccessControlException { + UserGroupInformation callerUgi = authzContext.getCallerUgi(); + boolean isSuperUser = + callerUgi.getShortUserName().equals(authzContext.getFsOwner()) || + callerUgi.getGroupsSet().contains(authzContext.getSupergroup()); + if (!isSuperUser) { + throw new AccessControlException("Access denied for user " + + callerUgi.getShortUserName() + ". Superuser privilege is " + + "required for operation " + authzContext.getOperationName()); + } + } } /** * Initialize the provider. This method is called at NameNode startup diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index aee4f68bdc7f2..4d32dbaa804a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -2599,7 +2599,7 @@ public void disableErasureCodingPolicy(String ecPolicyName) public void startReconfiguration() throws IOException { checkNNStartup(); String operationName = "startNamenodeReconfiguration"; - namesystem.checkSuperuserPrivilege(operationName); + namesystem.checkSuperuserPrivilege(operationName, null); nn.startReconfigurationTask(); namesystem.logAuditEvent(true, operationName, null); } @@ -2609,7 +2609,7 @@ public ReconfigurationTaskStatus getReconfigurationStatus() throws IOException { checkNNStartup(); String operationName = "getNamenodeReconfigurationStatus"; - namesystem.checkSuperuserPrivilege(operationName); + namesystem.checkSuperuserPrivilege(operationName, null); ReconfigurationTaskStatus status = nn.getReconfigurationTaskStatus(); namesystem.logAuditEvent(true, operationName, null); return status; @@ -2619,7 +2619,7 @@ public ReconfigurationTaskStatus getReconfigurationStatus() public List listReconfigurableProperties() throws IOException { checkNNStartup(); String operationName = "listNamenodeReconfigurableProperties"; - namesystem.checkSuperuserPrivilege(operationName); + namesystem.checkSuperuserPrivilege(operationName, null); List result = Lists.newArrayList(nn.getReconfigurableProperties()); namesystem.logAuditEvent(true, operationName, null); return result; @@ -2629,7 +2629,7 @@ public List listReconfigurableProperties() throws IOException { public Long getNextSPSPath() throws IOException { checkNNStartup(); String operationName = "getNextSPSPath"; - namesystem.checkSuperuserPrivilege(operationName); + namesystem.checkSuperuserPrivilege(operationName, null); if (nn.isStandbyState()) { throw new StandbyException("Not supported by Standby Namenode."); } From 41d0bb6a9b7686412ebf115bc4b9e763cfb2e58a Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Wed, 17 Mar 2021 18:12:58 -0700 Subject: [PATCH 02/16] Add operation names for all the operations. --- .../router/RouterPermissionChecker.java | 1 + .../hdfs/server/namenode/FSDirAttrOp.java | 21 ++---- .../hdfs/server/namenode/FSNamesystem.java | 67 ++++++++++--------- .../server/namenode/FSPermissionChecker.java | 11 +++ .../hadoop/hdfs/server/namenode/NameNode.java | 24 +++---- .../server/namenode/NameNodeRpcServer.java | 59 ++++++++++------ .../hdfs/server/namenode/NamenodeFsck.java | 3 +- 7 files changed, 108 insertions(+), 78 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java index ac796ff823420..23a3c6e759e7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java @@ -103,6 +103,7 @@ public void checkPermission(MountTable mountTable, FsAction access) * based on Datanode#checkSuperuserPrivilege(). * @throws AccessControlException If the user is not authorized. */ + @Override public void checkSuperuserPrivilege() throws AccessControlException { // Try to get the ugi in the RPC call. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index b689305530fcf..4b2ca613a3d1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -247,20 +247,13 @@ static void setQuota(FSDirectory fsd, FSPermissionChecker pc, String src, fsd.writeLock(); try { INodesInPath iip = fsd.resolvePath(pc, src, DirOp.WRITE); - if (fsd.isPermissionEnabled()) { - if (allowOwner && !pc.isSuperUser()) { - try { - fsd.checkOwner(pc, iip.getParentINodesInPath()); - } catch(AccessControlException ace) { - throw new AccessControlException( - "Access denied for user " + pc.getUser() + - ". Superuser or owner of parent folder privilege" + - " is required"); - } - } else { - // At this point, it must be a super user. - // Call the external enforcer for audit. - pc.checkSuperuserPrivilege(iip.getPath()); + if (fsd.isPermissionEnabled() && !pc.isSuperUser() && allowOwner) { + try { + fsd.checkOwner(pc, iip.getParentINodesInPath()); + } catch(AccessControlException ace) { + throw new AccessControlException( + "Access denied for user " + pc.getUser() + + ". Superuser or owner of parent folder privilege is required"); } } INodeDirectory changed = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 15eda598c50bc..6661509e4f7a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1907,7 +1907,7 @@ public BlocksWithLocations getBlocks(DatanodeID datanode, long size, long */ void metaSave(String filename) throws IOException { String operationName = "metaSave"; - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); checkOperation(OperationCategory.READ); readLock(); try { @@ -4860,7 +4860,7 @@ DatanodeInfo[] datanodeReport(final DatanodeReportType type) throws IOException { String operationName = "datanodeReport"; DatanodeInfo[] arr; - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); checkOperation(OperationCategory.UNCHECKED); readLock(); try { @@ -4884,7 +4884,7 @@ DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type ) throws IOException { String operationName = "getDatanodeStorageReport"; DatanodeStorageReport[] reports; - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); checkOperation(OperationCategory.UNCHECKED); readLock(); try { @@ -4907,7 +4907,7 @@ boolean saveNamespace(final long timeWindow, final long txGap) throws IOException { String operationName = "saveNamespace"; checkOperation(OperationCategory.UNCHECKED); - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); boolean saved = false; cpLock(); // Block if a checkpointing is in progress on standby. @@ -4940,7 +4940,7 @@ boolean saveNamespace(final long timeWindow, final long txGap) boolean restoreFailedStorage(String arg) throws IOException { String operationName = getFailedStorageCommand(arg); boolean val = false; - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); checkOperation(OperationCategory.UNCHECKED); cpLock(); // Block if a checkpointing is in progress on standby. writeLock(); @@ -4968,7 +4968,7 @@ Date getStartTime() { void finalizeUpgrade() throws IOException { String operationName = "finalizeUpgrade"; - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); checkOperation(OperationCategory.UNCHECKED); cpLock(); // Block if a checkpointing is in progress on standby. writeLock(); @@ -4985,7 +4985,7 @@ void finalizeUpgrade() throws IOException { void refreshNodes() throws IOException { String operationName = "refreshNodes"; checkOperation(OperationCategory.UNCHECKED); - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); getBlockManager().getDatanodeManager().refreshNodes(new HdfsConfiguration()); logAuditEvent(true, operationName, null); } @@ -4993,7 +4993,7 @@ void refreshNodes() throws IOException { void setBalancerBandwidth(long bandwidth) throws IOException { String operationName = "setBalancerBandwidth"; checkOperation(OperationCategory.WRITE); - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth); logAuditEvent(true, operationName, null); } @@ -5002,7 +5002,7 @@ boolean setSafeMode(SafeModeAction action) throws IOException { String operationName = action.toString().toLowerCase(); boolean error = false; if (action != SafeModeAction.SAFEMODE_GET) { - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); switch(action) { case SAFEMODE_LEAVE: // leave safe mode leaveSafeMode(false); @@ -5158,7 +5158,7 @@ private synchronized void setManualAndResourceLowSafeMode(boolean manual, CheckpointSignature rollEditLog() throws IOException { String operationName = "rollEditLog"; CheckpointSignature result = null; - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); checkOperation(OperationCategory.JOURNAL); writeLock(); try { @@ -5222,6 +5222,12 @@ PermissionStatus createFsOwnerPermissions(FsPermission permission) { return new PermissionStatus(fsOwner.getShortUserName(), supergroup, permission); } + /** + * This method is retained for backward compatibility. + * Please use {@link #checkSuperuserPrivilege(String)} instead. + * + * @throws AccessControlException if user is not a super user. + */ void checkSuperuserPrivilege() throws AccessControlException { if (isPermissionEnabled) { FSPermissionChecker pc = getPermissionChecker(); @@ -5229,11 +5235,9 @@ void checkSuperuserPrivilege() throws AccessControlException { } } - void checkSuperuserPrivilege(String path) throws AccessControlException { - if (isPermissionEnabled) { - FSPermissionChecker pc = getPermissionChecker(); - pc.checkSuperuserPrivilege(path); - } + void checkSuperuserPrivilege(String operationName) + throws IOException { + checkSuperuserPrivilege(operationName, null); } /** @@ -7302,7 +7306,7 @@ void removeSnapshottableDirs(List toRemove) { RollingUpgradeInfo queryRollingUpgrade() throws IOException { final String operationName = "queryRollingUpgrade"; - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); checkOperation(OperationCategory.READ); readLock(); try { @@ -7322,7 +7326,7 @@ RollingUpgradeInfo queryRollingUpgrade() throws IOException { RollingUpgradeInfo startRollingUpgrade() throws IOException { final String operationName = "startRollingUpgrade"; - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -7513,7 +7517,7 @@ void checkRollingUpgrade(String action) throws RollingUpgradeException { RollingUpgradeInfo finalizeRollingUpgrade() throws IOException { final String operationName = "finalizeRollingUpgrade"; - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -7668,7 +7672,7 @@ void addCachePool(CachePoolInfo req, boolean logRetryCache) checkOperation(OperationCategory.WRITE); String poolInfoStr = null; try { - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -7695,7 +7699,7 @@ void modifyCachePool(CachePoolInfo req, boolean logRetryCache) String poolNameStr = "{poolName: " + (req == null ? null : req.getPoolName()) + "}"; try { - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -7722,7 +7726,7 @@ void removeCachePool(String cachePoolName, boolean logRetryCache) checkOperation(OperationCategory.WRITE); String poolNameStr = "{poolName: " + cachePoolName + "}"; try { - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -7988,7 +7992,7 @@ BatchedListEntries listEncryptionZones(long prevId) final String operationName = "listEncryptionZones"; boolean success = false; checkOperation(OperationCategory.READ); - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); readLock(); try { checkOperation(OperationCategory.READ); @@ -8025,7 +8029,7 @@ BatchedListEntries listReencryptionStatus( final String operationName = "listReencryptionStatus"; boolean success = false; checkOperation(OperationCategory.READ); - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); readLock(); try { checkOperation(OperationCategory.READ); @@ -8306,7 +8310,7 @@ void unsetErasureCodingPolicy(final String srcArg, public ECTopologyVerifierResult getECTopologyResultForPolicies( String[] policyNames) throws IOException { String operationName = "getECTopologyResultForPolicies"; - checkSuperuserPrivilege(operationName, null); + checkSuperuserPrivilege(operationName); checkOperation(OperationCategory.UNCHECKED); ECTopologyVerifierResult result; readLock(); @@ -8872,12 +8876,15 @@ private ECTopologyVerifierResult getEcTopologyVerifierResultForEnabledPolicies() // It should be called without holding FSN lock. void checkSuperuserPrivilege(String operationName, String path) throws IOException { - try { - FSPermissionChecker.setOperationType(operationName); - checkSuperuserPrivilege(path); - } catch (AccessControlException ace) { - logAuditEvent(false, operationName, null); - throw ace; + if (isPermissionEnabled) { + try { + FSPermissionChecker.setOperationType(operationName); + FSPermissionChecker pc = getPermissionChecker(); + pc.checkSuperuserPrivilege(path); + } catch(AccessControlException ace){ + logAuditEvent(false, operationName, null); + throw ace; + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index b4ba65bbc6223..927766d47ac29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -165,6 +165,17 @@ private AuthorizationContext getAuthorizationContextForSuperUser( return builder.build(); } + /** + * This method is retained to maintain backward compatibility. + * Please use the new method {@link #checkSuperuserPrivilege(String)} to make + * sure that the external enforcers have the correct context to audit. + * + * @throws AccessControlException if the caller is not a super user. + */ + public void checkSuperuserPrivilege() throws AccessControlException { + checkSuperuserPrivilege(null); + } + /** * Checks if the caller has super user privileges. * Throws {@link AccessControlException} for non super users. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 55196c4d44f03..ed3d80bbfc201 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -1835,9 +1835,9 @@ public static void main(String argv[]) throws Exception { } } - synchronized void monitorHealth() - throws HealthCheckFailedException, AccessControlException { - namesystem.checkSuperuserPrivilege(); + synchronized void monitorHealth() throws IOException { + String operationName = "monitorHealth"; + namesystem.checkSuperuserPrivilege(operationName); if (!haEnabled) { return; // no-op, if HA is not enabled } @@ -1859,9 +1859,9 @@ synchronized void monitorHealth() } } - synchronized void transitionToActive() - throws ServiceFailedException, AccessControlException { - namesystem.checkSuperuserPrivilege(); + synchronized void transitionToActive() throws IOException { + String operationName = "transitionToActive"; + namesystem.checkSuperuserPrivilege(operationName); if (!haEnabled) { throw new ServiceFailedException("HA for namenode is not enabled"); } @@ -1876,18 +1876,18 @@ synchronized void transitionToActive() state.setState(haContext, ACTIVE_STATE); } - synchronized void transitionToStandby() - throws ServiceFailedException, AccessControlException { - namesystem.checkSuperuserPrivilege(); + synchronized void transitionToStandby() throws IOException { + String operationName = "transitionToStandby"; + namesystem.checkSuperuserPrivilege(operationName); if (!haEnabled) { throw new ServiceFailedException("HA for namenode is not enabled"); } state.setState(haContext, STANDBY_STATE); } - synchronized void transitionToObserver() - throws ServiceFailedException, AccessControlException { - namesystem.checkSuperuserPrivilege(); + synchronized void transitionToObserver() throws IOException { + String operationName = "transitionToObserver"; + namesystem.checkSuperuserPrivilege(operationName); if (!haEnabled) { throw new ServiceFailedException("HA for namenode is not enabled"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 4d32dbaa804a3..90819c28ffc3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -642,6 +642,7 @@ private static UserGroupInformation getRemoteUser() throws IOException { public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size, long minBlockSize, long timeInterval) throws IOException { + String operationName = "getBlocks"; if(size <= 0) { throw new IllegalArgumentException( "Unexpected not positive size: "+size); @@ -651,15 +652,16 @@ public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size, long "Unexpected not positive size: "+size); } checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); namesystem.checkNameNodeSafeMode("Cannot execute getBlocks"); return namesystem.getBlocks(datanode, size, minBlockSize, timeInterval); } @Override // NamenodeProtocol public ExportedBlockKeys getBlockKeys() throws IOException { + String operationName = "getBlockKeys"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); return namesystem.getBlockManager().getBlockKeys(); } @@ -667,9 +669,10 @@ public ExportedBlockKeys getBlockKeys() throws IOException { public void errorReport(NamenodeRegistration registration, int errorCode, String msg) throws IOException { + String operationName = "errorReport"; checkNNStartup(); namesystem.checkOperation(OperationCategory.UNCHECKED); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); verifyRequest(registration); LOG.info("Error report from " + registration + ": " + msg); if (errorCode == FATAL) { @@ -680,8 +683,9 @@ public void errorReport(NamenodeRegistration registration, @Override // NamenodeProtocol public NamenodeRegistration registerSubordinateNamenode( NamenodeRegistration registration) throws IOException { + String operationName = "registerSubordinateNamenode"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); verifyLayoutVersion(registration.getVersion()); NamenodeRegistration myRegistration = nn.setRegistration(); namesystem.registerBackupNode(registration, myRegistration); @@ -691,8 +695,9 @@ public NamenodeRegistration registerSubordinateNamenode( @Override // NamenodeProtocol public NamenodeCommand startCheckpoint(NamenodeRegistration registration) throws IOException { + String operationName = "startCheckpoint"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); verifyRequest(registration); if(!nn.isRole(NamenodeRole.NAMENODE)) throw new IOException("Only an ACTIVE node can invoke startCheckpoint."); @@ -714,8 +719,9 @@ public NamenodeCommand startCheckpoint(NamenodeRegistration registration) @Override // NamenodeProtocol public void endCheckpoint(NamenodeRegistration registration, CheckpointSignature sig) throws IOException { + String operationName = "endCheckpoint"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); if (cacheEntry != null && cacheEntry.isSuccess()) { return; // Return previous response @@ -1322,17 +1328,19 @@ public void refreshNodes() throws IOException { @Override // NamenodeProtocol public long getTransactionID() throws IOException { + String operationName = "getTransactionID"; checkNNStartup(); namesystem.checkOperation(OperationCategory.UNCHECKED); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); return namesystem.getFSImage().getCorrectLastAppliedOrWrittenTxId(); } @Override // NamenodeProtocol public long getMostRecentCheckpointTxId() throws IOException { + String operationName = "getMostRecentCheckpointTxId"; checkNNStartup(); namesystem.checkOperation(OperationCategory.UNCHECKED); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); return namesystem.getFSImage().getMostRecentCheckpointTxId(); } @@ -1345,23 +1353,26 @@ public CheckpointSignature rollEditLog() throws IOException { @Override // NamenodeProtocol public RemoteEditLogManifest getEditLogManifest(long sinceTxId) throws IOException { + String operationName = "getEditLogManifest"; checkNNStartup(); namesystem.checkOperation(OperationCategory.READ); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); return namesystem.getEditLog().getEditLogManifest(sinceTxId); } @Override // NamenodeProtocol public boolean isUpgradeFinalized() throws IOException { + String operationName = "isUpgradeFinalized"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); return namesystem.isUpgradeFinalized(); } @Override // NamenodeProtocol public boolean isRollingUpgrade() throws IOException { + String operationName = "isRollingUpgrade"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); return namesystem.isRollingUpgrade(); } @@ -2345,9 +2356,10 @@ public void checkAccess(String path, FsAction mode) throws IOException { @Override // ClientProtocol public long getCurrentEditLogTxid() throws IOException { + String operationName = "getCurrentEditLogTxid"; checkNNStartup(); namesystem.checkOperation(OperationCategory.READ); // only active - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); // if it's not yet open for write, we may be in the process of transitioning // from standby to active and may not yet know what the latest committed // txid is @@ -2374,9 +2386,10 @@ private static FSEditLogOp readOp(EditLogInputStream elis) @Override // ClientProtocol public EventBatchList getEditsFromTxid(long txid) throws IOException { + String operationName = "getEditsFromTxid"; checkNNStartup(); namesystem.checkOperation(OperationCategory.READ); // only active - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); int maxEventsPerRPC = nn.getConf().getInt( DFSConfigKeys.DFS_NAMENODE_INOTIFY_MAX_EVENTS_PER_RPC_KEY, DFSConfigKeys.DFS_NAMENODE_INOTIFY_MAX_EVENTS_PER_RPC_DEFAULT); @@ -2521,8 +2534,9 @@ public ECTopologyVerifierResult getECTopologyResultForPolicies( @Override public AddErasureCodingPolicyResponse[] addErasureCodingPolicies( ErasureCodingPolicy[] policies) throws IOException { + String operationName = "addErasureCodingPolicies"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); final CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null); if (cacheEntry != null && cacheEntry.isSuccess()) { @@ -2544,8 +2558,9 @@ public AddErasureCodingPolicyResponse[] addErasureCodingPolicies( @Override public void removeErasureCodingPolicy(String ecPolicyName) throws IOException { + String operationName = "removeErasureCodingPolicy"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); if (cacheEntry != null && cacheEntry.isSuccess()) { return; @@ -2562,8 +2577,9 @@ public void removeErasureCodingPolicy(String ecPolicyName) @Override // ClientProtocol public void enableErasureCodingPolicy(String ecPolicyName) throws IOException { + String operationName = "enableErasureCodingPolicy"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); if (cacheEntry != null && cacheEntry.isSuccess()) { return; @@ -2580,8 +2596,9 @@ public void enableErasureCodingPolicy(String ecPolicyName) @Override // ClientProtocol public void disableErasureCodingPolicy(String ecPolicyName) throws IOException { + String operationName = "disableErasureCodingPolicy"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); if (cacheEntry != null && cacheEntry.isSuccess()) { return; @@ -2599,7 +2616,7 @@ public void disableErasureCodingPolicy(String ecPolicyName) public void startReconfiguration() throws IOException { checkNNStartup(); String operationName = "startNamenodeReconfiguration"; - namesystem.checkSuperuserPrivilege(operationName, null); + namesystem.checkSuperuserPrivilege(operationName); nn.startReconfigurationTask(); namesystem.logAuditEvent(true, operationName, null); } @@ -2609,7 +2626,7 @@ public ReconfigurationTaskStatus getReconfigurationStatus() throws IOException { checkNNStartup(); String operationName = "getNamenodeReconfigurationStatus"; - namesystem.checkSuperuserPrivilege(operationName, null); + namesystem.checkSuperuserPrivilege(operationName); ReconfigurationTaskStatus status = nn.getReconfigurationTaskStatus(); namesystem.logAuditEvent(true, operationName, null); return status; @@ -2619,7 +2636,7 @@ public ReconfigurationTaskStatus getReconfigurationStatus() public List listReconfigurableProperties() throws IOException { checkNNStartup(); String operationName = "listNamenodeReconfigurableProperties"; - namesystem.checkSuperuserPrivilege(operationName, null); + namesystem.checkSuperuserPrivilege(operationName); List result = Lists.newArrayList(nn.getReconfigurableProperties()); namesystem.logAuditEvent(true, operationName, null); return result; @@ -2629,7 +2646,7 @@ public List listReconfigurableProperties() throws IOException { public Long getNextSPSPath() throws IOException { checkNNStartup(); String operationName = "getNextSPSPath"; - namesystem.checkSuperuserPrivilege(operationName, null); + namesystem.checkSuperuserPrivilege(operationName); if (nn.isStandbyState()) { throw new StandbyException("Not supported by Standby Namenode."); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 3ec7d61859143..9c0b381885cb1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -377,9 +377,10 @@ private void printDatanodeReplicaStatus(Block block, */ public void fsck() throws AccessControlException { final long startTime = Time.monotonicNow(); + String operationName = "fsck"; try { if(blockIds != null) { - namenode.getNamesystem().checkSuperuserPrivilege(); + namenode.getNamesystem().checkSuperuserPrivilege(operationName); StringBuilder sb = new StringBuilder(); sb.append("FSCK started by " + UserGroupInformation.getCurrentUser() + " from " + From c5fef7c3626901829dca5c4b34f5ee69345f64ba Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Thu, 18 Mar 2021 16:04:44 -0700 Subject: [PATCH 03/16] Fix checkstyle issues. --- .../hadoop/hdfs/server/namenode/FSDirAttrOp.java | 2 +- .../hadoop/hdfs/server/namenode/FSDirectory.java | 12 ++++++------ .../hdfs/server/namenode/INodeAttributeProvider.java | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index 4b2ca613a3d1e..958bbfcb471ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -253,7 +253,7 @@ static void setQuota(FSDirectory fsd, FSPermissionChecker pc, String src, } catch(AccessControlException ace) { throw new AccessControlException( "Access denied for user " + pc.getUser() + - ". Superuser or owner of parent folder privilege is required"); + ". Superuser or owner of parent folder privilege is required"); } } INodeDirectory changed = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 9621a57de8fec..7373e54e83962 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -721,12 +721,12 @@ public INodesInPath resolvePath(FSPermissionChecker pc, String src, INodesInPath iip = INodesInPath.resolve(rootDir, components, isRaw); if (isPermissionEnabled && pc != null && isRaw) { switch(dirOp) { - case READ_LINK: - case READ: - break; - default: - pc.checkSuperuserPrivilege(iip.getPath()); - break; + case READ_LINK: + case READ: + break; + default: + pc.checkSuperuserPrivilege(iip.getPath()); + break; } } // verify all ancestors are dirs and traversable. note that only diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java index ec5a847bdffb1..96b56ef3980fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java @@ -416,7 +416,7 @@ default void checkPermissionWithContext(AuthorizationContext authzContext) * @throws AccessControlException - if user is not a super user or part * of the super user group. */ - default void checkSuperUserPermissionWithContext ( + default void checkSuperUserPermissionWithContext( AuthorizationContext authzContext) throws AccessControlException { UserGroupInformation callerUgi = authzContext.getCallerUgi(); From 5cf366cdf8d5b5ef2c8315aed3acea242eaf0bb8 Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Sat, 20 Mar 2021 13:45:49 -0700 Subject: [PATCH 04/16] Address review comments --- .../hadoop/hdfs/server/namenode/FSDirAttrOp.java | 8 ++++++-- .../hadoop/hdfs/server/namenode/FSNamesystem.java | 12 ++++++------ .../hdfs/server/namenode/INodeAttributeProvider.java | 2 +- .../hadoop/hdfs/server/namenode/NamenodeFsck.java | 2 +- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index 958bbfcb471ce..b1c22cb45bc40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -84,8 +84,10 @@ static FileStatus setOwner( iip = fsd.resolvePath(pc, src, DirOp.WRITE); // Only the owner or super user can change the group or owner fsd.checkOwner(pc, iip); - // Only a super user can change ownership to a different user - // or group to a different group that the user doesn't belong to + // superuser: can change owner to a different user, + // change owner group to any group + // owner: can't change owner to a different user but can change owner + // group to different group that the user belongs to. if ((username != null && !pc.getUser().equals(username)) || (group != null && !pc.isMemberOfGroup(group))) { try { @@ -247,6 +249,8 @@ static void setQuota(FSDirectory fsd, FSPermissionChecker pc, String src, fsd.writeLock(); try { INodesInPath iip = fsd.resolvePath(pc, src, DirOp.WRITE); + // Here, the assumption is that the caller of this method has + // already checked for super user privilege if (fsd.isPermissionEnabled() && !pc.isSuperUser() && allowOwner) { try { fsd.checkOwner(pc, iip.getParentINodesInPath()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 6661509e4f7a3..2116243b396c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2415,7 +2415,7 @@ private void checkStoragePolicyEnabled(final String operationNameReadable, } if (checkSuperUser && isStoragePolicySuperuserOnly) { checkSuperuserPrivilege( - CaseUtils.toCamelCase(operationNameReadable, false), null); + CaseUtils.toCamelCase(operationNameReadable, false)); } } @@ -7699,7 +7699,7 @@ void modifyCachePool(CachePoolInfo req, boolean logRetryCache) String poolNameStr = "{poolName: " + (req == null ? null : req.getPoolName()) + "}"; try { - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, poolNameStr); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -7726,7 +7726,7 @@ void removeCachePool(String cachePoolName, boolean logRetryCache) checkOperation(OperationCategory.WRITE); String poolNameStr = "{poolName: " + cachePoolName + "}"; try { - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, poolNameStr); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -7992,7 +7992,7 @@ BatchedListEntries listEncryptionZones(long prevId) final String operationName = "listEncryptionZones"; boolean success = false; checkOperation(OperationCategory.READ); - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, dir.rootDir.getFullPathName()); readLock(); try { checkOperation(OperationCategory.READ); @@ -8029,7 +8029,7 @@ BatchedListEntries listReencryptionStatus( final String operationName = "listReencryptionStatus"; boolean success = false; checkOperation(OperationCategory.READ); - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, dir.rootDir.getFullPathName()); readLock(); try { checkOperation(OperationCategory.READ); @@ -8882,7 +8882,7 @@ void checkSuperuserPrivilege(String operationName, String path) FSPermissionChecker pc = getPermissionChecker(); pc.checkSuperuserPrivilege(path); } catch(AccessControlException ace){ - logAuditEvent(false, operationName, null); + logAuditEvent(false, operationName, path); throw ace; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java index 96b56ef3980fe..e3803b92e5733 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java @@ -407,7 +407,7 @@ default void checkPermissionWithContext(AuthorizationContext authzContext) } /** - * Checks if the user belongs to superuser group. + * Checks if the user is a superuser or belongs to superuser group. * It throws an AccessControlException if user is not a superuser. * * @param authzContext an {@link AuthorizationContext} object encapsulating diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 9c0b381885cb1..a39648979c939 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -380,7 +380,7 @@ public void fsck() throws AccessControlException { String operationName = "fsck"; try { if(blockIds != null) { - namenode.getNamesystem().checkSuperuserPrivilege(operationName); + namenode.getNamesystem().checkSuperuserPrivilege(operationName, path); StringBuilder sb = new StringBuilder(); sb.append("FSCK started by " + UserGroupInformation.getCurrentUser() + " from " + From 4529aea0ceb5dfcb737bc4bb398f8559a72de5e5 Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Mon, 22 Mar 2021 14:21:29 -0700 Subject: [PATCH 05/16] Address review comments --- .../org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java | 2 +- .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index b1c22cb45bc40..5914d7449619c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -82,8 +82,8 @@ static FileStatus setOwner( fsd.writeLock(); try { iip = fsd.resolvePath(pc, src, DirOp.WRITE); - // Only the owner or super user can change the group or owner fsd.checkOwner(pc, iip); + // At this point, the user must be either owner or super user. // superuser: can change owner to a different user, // change owner group to any group // owner: can't change owner to a different user but can change owner diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 2116243b396c5..c64ef4c15f5e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -7672,7 +7672,7 @@ void addCachePool(CachePoolInfo req, boolean logRetryCache) checkOperation(OperationCategory.WRITE); String poolInfoStr = null; try { - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, req.getPoolName()); writeLock(); try { checkOperation(OperationCategory.WRITE); From 3eac2b962f049bba21ea4a166f1502ed50104c24 Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Mon, 22 Mar 2021 16:43:25 -0700 Subject: [PATCH 06/16] Add null check --- .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index c64ef4c15f5e0..d9eb1c6b79047 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -7671,8 +7671,9 @@ void addCachePool(CachePoolInfo req, boolean logRetryCache) final String operationName = "addCachePool"; checkOperation(OperationCategory.WRITE); String poolInfoStr = null; + String poolName = req == null ? null : req.getPoolName(); try { - checkSuperuserPrivilege(operationName, req.getPoolName()); + checkSuperuserPrivilege(operationName, poolName); writeLock(); try { checkOperation(OperationCategory.WRITE); From 9f643eac7c9c0cfd8d383b67dc2962f9ccf23aab Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Tue, 23 Mar 2021 16:13:32 -0700 Subject: [PATCH 07/16] Address review comments --- .../ContentSummaryComputationContext.java | 9 +++++++-- .../hdfs/server/namenode/FSDirXAttrOp.java | 5 ++++- .../hdfs/server/namenode/FSDirectory.java | 5 ++++- .../hdfs/server/namenode/FSNamesystem.java | 2 +- .../server/namenode/FSPermissionChecker.java | 9 +++++++++ .../namenode/INodeAttributeProvider.java | 19 +++++++++++++++++++ .../namenode/XAttrPermissionFilter.java | 8 ++++++++ 7 files changed, 52 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java index 7a5963a6c57cd..e304baf652843 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java @@ -205,8 +205,13 @@ public String getErasureCodingPolicyName(INode inode) { void checkPermission(INodeDirectory inode, int snapshotId, FsAction access) throws AccessControlException { if (dir != null && dir.isPermissionEnabled() - && pc != null && !pc.isSuperUser()) { - pc.checkPermission(inode, snapshotId, access); + && pc != null) { + if (pc.isSuperUser()) { + // call external enforcer for audit + pc.checkSuperuserPrivilege(inode.getFullPathName()); + } else { + pc.checkPermission(inode, snapshotId, access); + } } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java index ef83fe982b24c..ce79321f96801 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java @@ -437,7 +437,10 @@ private static void checkXAttrChangeAccess( if (inode != null && inode.isDirectory() && inode.getFsPermission().getStickyBit()) { - if (!pc.isSuperUser()) { + if (pc.isSuperUser()) { + // call external enforcer for audit + pc.checkSuperuserPrivilege(iip.getPath()); + } else { fsd.checkOwner(pc, iip); } } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 7373e54e83962..d62fd98bbb033 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1943,7 +1943,7 @@ void checkPermission(FSPermissionChecker pc, INodesInPath iip, FsAction access, FsAction subAccess, boolean ignoreEmptyDir) throws AccessControlException { if (pc.isSuperUser()) { - // call the enforcer for audit + // call the external enforcer for audit pc.checkSuperuserPrivilege(iip.getPath()); } else { readLock(); @@ -1961,9 +1961,12 @@ void checkUnreadableBySuperuser(FSPermissionChecker pc, INodesInPath iip) if (pc.isSuperUser()) { if (FSDirXAttrOp.getXAttrByPrefixedName(this, iip, SECURITY_XATTR_UNREADABLE_BY_SUPERUSER) != null) { + pc.denySuperUserAccess(iip.getPath()); throw new AccessControlException( "Access is denied for " + pc.getUser() + " since the superuser " + "is not allowed to perform this operation."); + } else { + pc.checkSuperuserPrivilege(iip.getPath()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index d9eb1c6b79047..007fc08c8334a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -7678,7 +7678,7 @@ void addCachePool(CachePoolInfo req, boolean logRetryCache) try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot add cache pool" - + (req == null ? null : req.getPoolName())); + + poolName); CachePoolInfo info = FSNDNCacheOp.addCachePool(this, cacheManager, req, logRetryCache); poolInfoStr = info.toString(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index 927766d47ac29..fcbad3dc322f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -189,6 +189,11 @@ public void checkSuperuserPrivilege(String path) getAuthorizationContextForSuperUser(path)); } + public void denySuperUserAccess(String path) throws AccessControlException { + getAccessControlEnforcer().denySuperUserAccess( + getAuthorizationContextForSuperUser(path)); + } + /** * Check whether current user have permissions to access the path. * Traverse is always checked. @@ -738,6 +743,10 @@ static void checkTraverse(FSPermissionChecker pc, INodesInPath iip, UnresolvedPathException, ParentNotDirectoryException { try { if (pc == null || pc.isSuperUser()) { + if (pc != null) { + // call the external enforcer for audit + pc.checkSuperuserPrivilege(iip.getPath()); + } checkSimpleTraverse(iip); } else { pc.checkPermission(iip, false, null, null, null, null, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java index e3803b92e5733..94896e0b815e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java @@ -429,7 +429,26 @@ default void checkSuperUserPermissionWithContext( "required for operation " + authzContext.getOperationName()); } } + + /** + * This method must be called when denying access to super users to + * notify the external enforcers. + * This will help the external enforcers to audit the requests + * by super users that were denied access. + * @param authzContext an {@link AuthorizationContext} object encapsulating + * the various parameters required to authorize an + * operation. + * @throws AccessControlException + */ + default void denySuperUserAccess(AuthorizationContext authzContext) + throws AccessControlException { + throw new AccessControlException("The authorization provider does not " + + "implement the denySuperUserAccess(AuthorizationContext) " + + "API."); + + } } + /** * Initialize the provider. This method is called at NameNode startup * time. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java index 92e5ef1a0b86d..b6dbb59c599e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java @@ -65,8 +65,14 @@ static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr, boolean isRawPath) throws AccessControlException { final boolean isSuperUser = pc.isSuperUser(); + final String xAttrString = + "XAttr [ns=" + xAttr.getNameSpace() + ", name=" + xAttr.getName() + "]"; if (xAttr.getNameSpace() == XAttr.NameSpace.USER || (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && isSuperUser)) { + if (isSuperUser) { + // call the external enforcer for audit. + pc.checkSuperuserPrivilege(xAttrString); + } return; } if (xAttr.getNameSpace() == XAttr.NameSpace.RAW && isRawPath) { @@ -75,6 +81,8 @@ static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr, if (XAttrHelper.getPrefixedName(xAttr). equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) { if (xAttr.getValue() != null) { + // Notify external enforcer for audit + pc.denySuperUserAccess(xAttrString); throw new AccessControlException("Attempt to set a value for '" + SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "'. Values are not allowed for this xattr."); From 9339987874597ba62b3ae2a0a94a254a2c1eb9c3 Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Thu, 25 Mar 2021 13:00:59 -0700 Subject: [PATCH 08/16] Fix unit test failures --- .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 5 +++-- .../hadoop/hdfs/server/namenode/FSPermissionChecker.java | 2 +- .../hdfs/server/namenode/INodeAttributeProvider.java | 7 +------ 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 007fc08c8334a..f300f18e5e651 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2289,6 +2289,7 @@ boolean truncate(String src, long newLength, String clientName, final String operationName = "truncate"; requireEffectiveLayoutVersionForFeature(Feature.TRUNCATE); FSDirTruncateOp.TruncateResult r = null; + FileStatus status; try { NameNode.stateChangeLog.info( "DIR* NameSystem.truncate: src={} newLength={}", src, newLength); @@ -2307,7 +2308,7 @@ boolean truncate(String src, long newLength, String clientName, r = FSDirTruncateOp.truncate(this, src, newLength, clientName, clientMachine, mtime, toRemoveBlocks, pc); } finally { - FileStatus status = r != null ? r.getFileStatus() : null; + status = r != null ? r.getFileStatus() : null; writeUnlock(operationName, getLockReportInfoSupplier(src, null, status)); } @@ -2316,7 +2317,7 @@ boolean truncate(String src, long newLength, String clientName, removeBlocks(toRemoveBlocks); toRemoveBlocks.clear(); } - logAuditEvent(true, operationName, src, null, r.getFileStatus()); + logAuditEvent(true, operationName, src, null, status); } catch (AccessControlException e) { logAuditEvent(false, operationName, src); throw e; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index fcbad3dc322f6..842cdbe2776c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -189,7 +189,7 @@ public void checkSuperuserPrivilege(String path) getAuthorizationContextForSuperUser(path)); } - public void denySuperUserAccess(String path) throws AccessControlException { + public void denySuperUserAccess(String path) { getAccessControlEnforcer().denySuperUserAccess( getAuthorizationContextForSuperUser(path)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java index 94896e0b815e7..380f77c3aba12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java @@ -438,13 +438,8 @@ default void checkSuperUserPermissionWithContext( * @param authzContext an {@link AuthorizationContext} object encapsulating * the various parameters required to authorize an * operation. - * @throws AccessControlException */ - default void denySuperUserAccess(AuthorizationContext authzContext) - throws AccessControlException { - throw new AccessControlException("The authorization provider does not " - + "implement the denySuperUserAccess(AuthorizationContext) " - + "API."); + default void denySuperUserAccess(AuthorizationContext authzContext) { } } From 303fb73751c344fb03fb4ebe349cd2223c4864ab Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Thu, 25 Mar 2021 18:23:16 -0700 Subject: [PATCH 09/16] Fix method name and signature in AccessControlEnforcer --- .../hadoop/hdfs/server/namenode/FSDirectory.java | 8 ++++---- .../hdfs/server/namenode/FSPermissionChecker.java | 15 ++++++++++++--- .../server/namenode/INodeAttributeProvider.java | 11 +++++++---- .../server/namenode/XAttrPermissionFilter.java | 10 +++++----- 4 files changed, 28 insertions(+), 16 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index d62fd98bbb033..497aa84e767a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1961,11 +1961,11 @@ void checkUnreadableBySuperuser(FSPermissionChecker pc, INodesInPath iip) if (pc.isSuperUser()) { if (FSDirXAttrOp.getXAttrByPrefixedName(this, iip, SECURITY_XATTR_UNREADABLE_BY_SUPERUSER) != null) { - pc.denySuperUserAccess(iip.getPath()); - throw new AccessControlException( - "Access is denied for " + pc.getUser() + " since the superuser " - + "is not allowed to perform this operation."); + String errorMessage = "Access is denied for " + pc.getUser() + + " since the superuser is not allowed to perform this operation."; + pc.denyUserAccess(iip.getPath(), errorMessage); } else { + // call the external enforcer for audit. pc.checkSuperuserPrivilege(iip.getPath()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index 842cdbe2776c6..87341eb17e3e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -189,9 +189,18 @@ public void checkSuperuserPrivilege(String path) getAuthorizationContextForSuperUser(path)); } - public void denySuperUserAccess(String path) { - getAccessControlEnforcer().denySuperUserAccess( - getAuthorizationContextForSuperUser(path)); + /** + * Calls the external enforcer to notify denial of access to the user with + * the given error message. Always throws an ACE with the given message. + * + * @param path The resource path for which permission is being requested. + * @param errorMessage message for the exception. + * @throws AccessControlException with the error message. + */ + public void denyUserAccess(String path, String errorMessage) + throws AccessControlException { + getAccessControlEnforcer().denyUserAccess( + getAuthorizationContextForSuperUser(path), errorMessage); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java index 380f77c3aba12..f5361adc4cff4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java @@ -431,16 +431,19 @@ default void checkSuperUserPermissionWithContext( } /** - * This method must be called when denying access to super users to + * This method must be called when denying access to users to * notify the external enforcers. * This will help the external enforcers to audit the requests - * by super users that were denied access. + * by users that were denied access. * @param authzContext an {@link AuthorizationContext} object encapsulating * the various parameters required to authorize an * operation. + * @throws AccessControlException */ - default void denySuperUserAccess(AuthorizationContext authzContext) { - + default void denyUserAccess(AuthorizationContext authzContext, + String errorMessage) + throws AccessControlException { + throw new AccessControlException(errorMessage); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java index b6dbb59c599e8..2d3adb2975324 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java @@ -82,15 +82,15 @@ static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr, equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) { if (xAttr.getValue() != null) { // Notify external enforcer for audit - pc.denySuperUserAccess(xAttrString); - throw new AccessControlException("Attempt to set a value for '" + + String errorMessage = "Attempt to set a value for '" + SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + - "'. Values are not allowed for this xattr."); + "'. Values are not allowed for this xattr."; + pc.denyUserAccess(xAttrString, errorMessage); } return; } - throw new AccessControlException("User doesn't have permission for xattr: " - + XAttrHelper.getPrefixedName(xAttr)); + pc.denyUserAccess(xAttrString, "User doesn't have permission for xattr: " + + XAttrHelper.getPrefixedName(xAttr)); } static void checkPermissionForApi(FSPermissionChecker pc, From 4a2f52aa61b4152782e474ea55ddd832510b4ef6 Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Mon, 29 Mar 2021 15:49:31 -0700 Subject: [PATCH 10/16] Add suppression for spot bug warning --- .../hadoop-hdfs/dev-support/findbugsExcludeFile.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index 5c2df9acf4e87..2095b5ce812d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -310,4 +310,10 @@ + + + + + + From 9c6436f796e549933c2a000d44b697339ead877e Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Tue, 30 Mar 2021 11:56:06 -0700 Subject: [PATCH 11/16] Fix spot bug warning --- .../dev-support/findbugsExcludeFile.xml | 72 +++++++++---------- .../hdfs/server/namenode/FSNamesystem.java | 1 + 2 files changed, 34 insertions(+), 39 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index 2095b5ce812d4..0fd9464441dbe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -258,49 +258,49 @@ - - - + + + - - + + - - + + - - + + - - - + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - @@ -310,10 +310,4 @@ - - - - - - diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index f300f18e5e651..e559515696d66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2322,6 +2322,7 @@ boolean truncate(String src, long newLength, String clientName, logAuditEvent(false, operationName, src); throw e; } + assert(r != null); return r.getResult(); } From 0fa5af4fec6006b5d74e75b834627f119ddff598 Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Wed, 31 Mar 2021 09:07:56 -0700 Subject: [PATCH 12/16] Revert findbugsExcludeFile formatting changes --- .../dev-support/findbugsExcludeFile.xml | 66 +++++++++---------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index 0fd9464441dbe..5c2df9acf4e87 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -258,49 +258,49 @@ - - - + + + - - + + - - + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + From 2bd481afacf0ff6b37d8f7c40ee241be81c8aefd Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Wed, 31 Mar 2021 16:57:21 -0700 Subject: [PATCH 13/16] Reorder checkUnreadableBySuperUser to avoid multiple audits --- .../hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index 8529a3811e57d..3b706837bfd0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -156,8 +156,8 @@ static GetBlockLocationsResult getBlockLocations( src = iip.getPath(); final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src); if (fsd.isPermissionEnabled()) { - fsd.checkPathAccess(pc, iip, FsAction.READ); fsd.checkUnreadableBySuperuser(pc, iip); + fsd.checkPathAccess(pc, iip, FsAction.READ); } final long fileSize = iip.isSnapshot() From e1cf2e8caa0da4ec7d6b16c78607c684a8e643b1 Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Thu, 1 Apr 2021 18:12:07 -0700 Subject: [PATCH 14/16] Add debug logging to new methods and avoid multiple audits in getBlockLocations call --- .../hdfs/server/namenode/FSDirStatAndListingOp.java | 8 +++++++- .../hdfs/server/namenode/FSPermissionChecker.java | 10 ++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index 3b706837bfd0f..7d6c98f77e9a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -152,7 +152,13 @@ static GetBlockLocationsResult getBlockLocations( BlockManager bm = fsd.getBlockManager(); fsd.readLock(); try { - final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ); + final INodesInPath iip; + if (pc.isSuperUser()) { + // Pass null to PC for superUsers to avoid multiple audits + iip = fsd.resolvePath(null, src, DirOp.READ); + } else { + iip = fsd.resolvePath(pc, src, DirOp.READ); + } src = iip.getPath(); final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src); if (fsd.isPermissionEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index 87341eb17e3e4..e8e292761d40c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -185,6 +185,11 @@ public void checkSuperuserPrivilege() throws AccessControlException { */ public void checkSuperuserPrivilege(String path) throws AccessControlException { + if (LOG.isDebugEnabled()) { + LOG.debug("SUPERUSER ACCESS CHECK: " + this + + ", operationName=" + FSPermissionChecker.operationType.get() + + ", path=" + path); + } getAccessControlEnforcer().checkSuperUserPermissionWithContext( getAuthorizationContextForSuperUser(path)); } @@ -199,6 +204,11 @@ public void checkSuperuserPrivilege(String path) */ public void denyUserAccess(String path, String errorMessage) throws AccessControlException { + if (LOG.isDebugEnabled()) { + LOG.debug("DENY USER ACCESS: " + this + + ", operationName=" + FSPermissionChecker.operationType.get() + + ", path=" + path); + } getAccessControlEnforcer().denyUserAccess( getAuthorizationContextForSuperUser(path), errorMessage); } From 1c6a130eb3ad6c9806cca119e9212358db687963 Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Tue, 6 Apr 2021 14:33:23 -0700 Subject: [PATCH 15/16] Remove unnecessary access check in getBlockLocations() --- .../hdfs/server/namenode/FSDirStatAndListingOp.java | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index 7d6c98f77e9a4..a1e16b361468c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -152,13 +152,8 @@ static GetBlockLocationsResult getBlockLocations( BlockManager bm = fsd.getBlockManager(); fsd.readLock(); try { - final INodesInPath iip; - if (pc.isSuperUser()) { - // Pass null to PC for superUsers to avoid multiple audits - iip = fsd.resolvePath(null, src, DirOp.READ); - } else { - iip = fsd.resolvePath(pc, src, DirOp.READ); - } + // Just get INodesInPath since we check for path access later + final INodesInPath iip = fsd.getINodesInPath(src, DirOp.READ); src = iip.getPath(); final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src); if (fsd.isPermissionEnabled()) { From 7cddb2517e2d317fc2cfc2bd8be55c20e8e5b275 Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Wed, 7 Apr 2021 15:59:27 -0700 Subject: [PATCH 16/16] Fix unit test failures --- .../hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index a1e16b361468c..8aff179358896 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -152,8 +152,9 @@ static GetBlockLocationsResult getBlockLocations( BlockManager bm = fsd.getBlockManager(); fsd.readLock(); try { - // Just get INodesInPath since we check for path access later - final INodesInPath iip = fsd.getINodesInPath(src, DirOp.READ); + // Just get INodesInPath without access checks, since we check for path + // access later + final INodesInPath iip = fsd.resolvePath(null, src, DirOp.READ); src = iip.getPath(); final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src); if (fsd.isPermissionEnabled()) {