From cb6f585cee7d8b07d8115a245f469b62b43a0081 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 9 Feb 2024 16:43:05 +0100 Subject: [PATCH 1/3] HDFS-17378. Add missing operationType for some operations in authorizer --- .../hdfs/server/namenode/FSNamesystem.java | 42 +++++++++++-------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 44b79a3e0eef8..cd702ac545849 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2618,15 +2618,16 @@ void unsetStoragePolicy(String src) throws IOException { * @throws IOException */ BlockStoragePolicy getStoragePolicy(String src) throws IOException { + final String operationName = "getStoragePolicy"; checkOperation(OperationCategory.READ); final FSPermissionChecker pc = getPermissionChecker(); - FSPermissionChecker.setOperationType(null); + FSPermissionChecker.setOperationType(operationName); readLock(); try { checkOperation(OperationCategory.READ); return FSDirAttrOp.getStoragePolicy(dir, pc, blockManager, src); } finally { - readUnlock("getStoragePolicy"); + readUnlock(operationName); } } @@ -2646,15 +2647,16 @@ BlockStoragePolicy[] getStoragePolicies() throws IOException { } long getPreferredBlockSize(String src) throws IOException { + final String operationName = "getPreferredBlockSize"; checkOperation(OperationCategory.READ); final FSPermissionChecker pc = getPermissionChecker(); - FSPermissionChecker.setOperationType(null); + FSPermissionChecker.setOperationType(operationName); readLock(); try { checkOperation(OperationCategory.READ); return FSDirAttrOp.getPreferredBlockSize(dir, pc, src); } finally { - readUnlock("getPreferredBlockSize"); + readUnlock(operationName); } } @@ -2727,6 +2729,7 @@ private HdfsFileStatus startFileInt(String src, long blockSize, CryptoProtocolVersion[] supportedVersions, String ecPolicyName, String storagePolicy, boolean logRetryCache) throws IOException { + final String operationName = "create"; if (NameNode.stateChangeLog.isDebugEnabled()) { StringBuilder builder = new StringBuilder(); builder.append("DIR* NameSystem.startFile: src=").append(src) @@ -2764,7 +2767,7 @@ private HdfsFileStatus startFileInt(String src, checkOperation(OperationCategory.WRITE); final FSPermissionChecker pc = getPermissionChecker(); - FSPermissionChecker.setOperationType(null); + FSPermissionChecker.setOperationType(operationName); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -2827,7 +2830,7 @@ private HdfsFileStatus startFileInt(String src, dir.writeUnlock(); } } finally { - writeUnlock("create", getLockReportInfoSupplier(src, null, stat)); + writeUnlock(operationName, getLockReportInfoSupplier(src, null, stat)); // There might be transactions logged while trying to recover the lease. // They need to be sync'ed even when an exception was thrown. if (!skipSync) { @@ -2856,10 +2859,11 @@ private HdfsFileStatus startFileInt(String src, */ boolean recoverLease(String src, String holder, String clientMachine) throws IOException { + final String operationName = "recoverLease"; boolean skipSync = false; checkOperation(OperationCategory.WRITE); final FSPermissionChecker pc = getPermissionChecker(); - FSPermissionChecker.setOperationType(null); + FSPermissionChecker.setOperationType(operationName); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -2880,7 +2884,7 @@ boolean recoverLease(String src, String holder, String clientMachine) skipSync = true; throw se; } finally { - writeUnlock("recoverLease"); + writeUnlock(operationName); // There might be transactions logged while trying to recover the lease. // They need to be sync'ed even when an exception was thrown. if (!skipSync) { @@ -3096,6 +3100,7 @@ LocatedBlock getAdditionalDatanode(String src, long fileId, final Set excludes, final int numAdditionalNodes, final String clientName ) throws IOException { + final String operationName = "getAdditionalDatanode"; //check if the feature is enabled dtpReplaceDatanodeOnFailure.checkEnabled(); @@ -3107,7 +3112,7 @@ LocatedBlock getAdditionalDatanode(String src, long fileId, final BlockType blockType; checkOperation(OperationCategory.WRITE); final FSPermissionChecker pc = getPermissionChecker(); - FSPermissionChecker.setOperationType(null); + FSPermissionChecker.setOperationType(operationName); readLock(); try { // Changing this operation category to WRITE instead of making getAdditionalDatanode as a @@ -3133,7 +3138,7 @@ LocatedBlock getAdditionalDatanode(String src, long fileId, "src=%s, fileId=%d, blk=%s, clientName=%s, clientMachine=%s", src, fileId, blk, clientName, clientMachine)); } finally { - readUnlock("getAdditionalDatanode"); + readUnlock(operationName); } if (clientnode == null) { @@ -3155,10 +3160,11 @@ LocatedBlock getAdditionalDatanode(String src, long fileId, */ void abandonBlock(ExtendedBlock b, long fileId, String src, String holder) throws IOException { + final String operationName = "abandonBlock"; NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: {} of file {}", b, src); checkOperation(OperationCategory.WRITE); final FSPermissionChecker pc = getPermissionChecker(); - FSPermissionChecker.setOperationType(null); + FSPermissionChecker.setOperationType(operationName); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -3167,7 +3173,7 @@ void abandonBlock(ExtendedBlock b, long fileId, String src, String holder) NameNode.stateChangeLog.debug( "BLOCK* NameSystem.abandonBlock: {} is removed from pendingCreates", b); } finally { - writeUnlock("abandonBlock"); + writeUnlock(operationName); } getEditLog().logSync(); } @@ -3220,11 +3226,12 @@ INodeFile checkLease(INodesInPath iip, String holder, long fileId) */ boolean completeFile(final String src, String holder, ExtendedBlock last, long fileId) - throws IOException { + throws IOException { + final String operationName = "completeFile"; boolean success = false; checkOperation(OperationCategory.WRITE); final FSPermissionChecker pc = getPermissionChecker(); - FSPermissionChecker.setOperationType(null); + FSPermissionChecker.setOperationType(operationName); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -3232,7 +3239,7 @@ boolean completeFile(final String src, String holder, success = FSDirWriteFileOp.completeFile(this, pc, src, holder, last, fileId); } finally { - writeUnlock("completeFile"); + writeUnlock(operationName); } getEditLog().logSync(); if (success) { @@ -3666,10 +3673,11 @@ void setQuota(String src, long nsQuota, long ssQuota, StorageType type) */ void fsync(String src, long fileId, String clientName, long lastBlockLength) throws IOException { + final String operationName = "fsync"; NameNode.stateChangeLog.info("BLOCK* fsync: " + src + " for " + clientName); checkOperation(OperationCategory.WRITE); final FSPermissionChecker pc = getPermissionChecker(); - FSPermissionChecker.setOperationType(null); + FSPermissionChecker.setOperationType(operationName); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -3683,7 +3691,7 @@ void fsync(String src, long fileId, String clientName, long lastBlockLength) } FSDirWriteFileOp.persistBlocks(dir, src, pendingFile, false); } finally { - writeUnlock("fsync"); + writeUnlock(operationName); } getEditLog().logSync(); } From d68e060e85d2f356da6885c2e33508abe8cbf11c Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 26 Feb 2024 17:19:10 +0100 Subject: [PATCH 2/3] Trigger ci From 09d7c02de82197594d8d85d89ced6daf3c8c51f8 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 1 Oct 2025 08:43:34 +0200 Subject: [PATCH 3/3] Remove now unneeded operationNames --- .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index ab98c7addb19a..db08ea6bfdb0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2890,7 +2890,6 @@ private HdfsFileStatus startFileInt(String src, */ boolean recoverLease(String src, String holder, String clientMachine) throws IOException { - final String operationName = "recoverLease"; boolean skipSync = false; final String operationName = "recoverLease"; checkOperation(OperationCategory.WRITE); @@ -3132,7 +3131,6 @@ LocatedBlock getAdditionalDatanode(String src, long fileId, final Set excludes, final int numAdditionalNodes, final String clientName ) throws IOException { - final String operationName = "getAdditionalDatanode"; //check if the feature is enabled dtpReplaceDatanodeOnFailure.checkEnabled(); @@ -3260,7 +3258,6 @@ INodeFile checkLease(INodesInPath iip, String holder, long fileId) boolean completeFile(final String src, String holder, ExtendedBlock last, long fileId) throws IOException { - final String operationName = "completeFile"; boolean success = false; final String operationName = "completeFile"; checkOperation(OperationCategory.WRITE);