diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index cb6ac539a9a14..48968a4647f80 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -405,15 +405,7 @@ public void createBucket( .setKeyName(bucketArgs.getEncryptionKey()).build(); } - List listOfAcls = new ArrayList<>(); - //User ACL - listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, - ugi.getUserName(), userRights)); - //Group ACLs of the User - List userGroups = Arrays.asList(UserGroupInformation - .createRemoteUser(ugi.getUserName()).getGroupNames()); - userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(ACLIdentityType.GROUP, group, groupRights))); + List listOfAcls = getAclList(); //ACLs from BucketArgs if(bucketArgs.getAcls() != null) { listOfAcls.addAll(bucketArgs.getAcls()); @@ -437,6 +429,16 @@ public void createBucket( ozoneManagerClient.createBucket(builder.build()); } + /** + * Helper function to get default acl list for current user. + * + * @return listOfAcls + * */ + private List getAclList() { + return OzoneUtils.getAclList(ugi.getUserName(), ugi.getGroups(), + userRights, groupRights); + } + @Override public void addBucketAcls( String volumeName, String bucketName, List addAcls) @@ -629,6 +631,7 @@ public OzoneOutputStream createKey( .setType(HddsProtos.ReplicationType.valueOf(type.toString())) .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) .addAllMetadata(metadata) + .setAcls(getAclList()) .build(); OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs); @@ -819,6 +822,7 @@ public OmMultipartInfo initiateMultipartUpload(String volumeName, .setKeyName(keyName) .setType(HddsProtos.ReplicationType.valueOf(type.toString())) .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) + .setAcls(getAclList()) .build(); OmMultipartInfo multipartInfo = ozoneManagerClient .initiateMultipartUpload(keyArgs); @@ -848,6 +852,7 @@ public OzoneOutputStream createMultipartKey(String volumeName, .setIsMultipartKey(true) .setMultipartUploadID(uploadID) .setMultipartUploadPartNumber(partNumber) + .setAcls(getAclList()) .build(); OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs); @@ -963,7 +968,10 @@ public OzoneFileStatus getOzoneFileStatus(String volumeName, public void createDirectory(String volumeName, String bucketName, String keyName) throws IOException { OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setKeyName(keyName).build(); + .setBucketName(bucketName) + .setKeyName(keyName) + .setAcls(getAclList()) + .build(); ozoneManagerClient.createDirectory(keyArgs); } @@ -990,6 +998,7 @@ public OzoneOutputStream createFile(String volumeName, String bucketName, .setDataSize(size) .setType(HddsProtos.ReplicationType.valueOf(type.name())) .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) + .setAcls(getAclList()) .build(); OpenKeySession keySession = ozoneManagerClient.createFile(keyArgs, overWrite, recursive); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java index 9a50ee03c1351..8ee33b4e9990b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java @@ -46,9 +46,10 @@ public class OzoneAcl { private ACLIdentityType type; private String name; private BitSet aclBitSet; + public static final BitSet ZERO_BITSET = new BitSet(0); /** - * Constructor for OzoneAcl. + * Default constructor. */ public OzoneAcl() { } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java index d90345cc00742..de07d08fb2d19 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java @@ -19,6 +19,7 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.Auditable; @@ -45,13 +46,15 @@ public final class OmKeyArgs implements Auditable { private final int multipartUploadPartNumber; private Map metadata; private boolean refreshPipeline; + private List acls; @SuppressWarnings("parameternumber") private OmKeyArgs(String volumeName, String bucketName, String keyName, long dataSize, ReplicationType type, ReplicationFactor factor, List locationInfoList, boolean isMultipart, String uploadID, int partNumber, - Map metadataMap, boolean refreshPipeline) { + Map metadataMap, boolean refreshPipeline, + List acls) { this.volumeName = volumeName; this.bucketName = bucketName; this.keyName = keyName; @@ -64,6 +67,7 @@ private OmKeyArgs(String volumeName, String bucketName, String keyName, this.multipartUploadPartNumber = partNumber; this.metadata = metadataMap; this.refreshPipeline = refreshPipeline; + this.acls = acls; } public boolean getIsMultipartKey() { @@ -86,6 +90,10 @@ public ReplicationFactor getFactor() { return factor; } + public List getAcls() { + return acls; + } + public String getVolumeName() { return volumeName; } @@ -166,6 +174,7 @@ public static class Builder { private int multipartUploadPartNumber; private Map metadata = new HashMap<>(); private boolean refreshPipeline; + private List acls; public Builder setVolumeName(String volume) { this.volumeName = volume; @@ -202,6 +211,11 @@ public Builder setLocationInfoList(List locationInfos) { return this; } + public Builder setAcls(List listOfAcls) { + this.acls = listOfAcls; + return this; + } + public Builder setIsMultipartKey(boolean isMultipart) { this.isMultipartKey = isMultipart; return this; @@ -235,7 +249,7 @@ public Builder setRefreshPipeline(boolean refresh) { public OmKeyArgs build() { return new OmKeyArgs(volumeName, bucketName, keyName, dataSize, type, factor, locationInfoList, isMultipartKey, multipartUploadID, - multipartUploadPartNumber, metadata, refreshPipeline); + multipartUploadPartNumber, metadata, refreshPipeline, acls); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index 07f7909f5dd8f..4e3862de721cc 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.hadoop.util.Time; @@ -50,6 +51,10 @@ public final class OmKeyInfo extends WithMetadata { private HddsProtos.ReplicationType type; private HddsProtos.ReplicationFactor factor; private FileEncryptionInfo encInfo; + /** + * ACL Information. + */ + private List acls; @SuppressWarnings("parameternumber") OmKeyInfo(String volumeName, String bucketName, String keyName, @@ -58,7 +63,7 @@ public final class OmKeyInfo extends WithMetadata { HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, Map metadata, - FileEncryptionInfo encInfo) { + FileEncryptionInfo encInfo, List acls) { this.volumeName = volumeName; this.bucketName = bucketName; this.keyName = keyName; @@ -81,6 +86,7 @@ public final class OmKeyInfo extends WithMetadata { this.type = type; this.metadata = metadata; this.encInfo = encInfo; + this.acls = acls; } public String getVolumeName() { @@ -216,6 +222,10 @@ public FileEncryptionInfo getFileEncryptionInfo() { return encInfo; } + public List getAcls() { + return acls; + } + /** * Builder of OmKeyInfo. */ @@ -232,6 +242,7 @@ public static class Builder { private HddsProtos.ReplicationFactor factor; private Map metadata; private FileEncryptionInfo encInfo; + private List acls; public Builder() { this.metadata = new HashMap<>(); @@ -299,11 +310,16 @@ public Builder setFileEncryptionInfo(FileEncryptionInfo feInfo) { return this; } + public Builder setAcls(List listOfAcls) { + this.acls = listOfAcls; + return this; + } + public OmKeyInfo build() { return new OmKeyInfo( volumeName, bucketName, keyName, omKeyLocationInfoGroups, dataSize, creationTime, modificationTime, type, factor, metadata, - encInfo); + encInfo, acls); } } @@ -327,6 +343,9 @@ public KeyInfo getProtobuf() { if (encInfo != null) { kb.setFileEncryptionInfo(OMPBHelper.convert(encInfo)); } + if(acls != null) { + kb.addAllAcls(acls); + } return kb.build(); } @@ -345,7 +364,8 @@ public static OmKeyInfo getFromProtobuf(KeyInfo keyInfo) { keyInfo.getFactor(), KeyValueUtil.getFromProtobuf(keyInfo.getMetadataList()), keyInfo.hasFileEncryptionInfo() ? OMPBHelper.convert(keyInfo - .getFileEncryptionInfo()): null); + .getFileEncryptionInfo()): null, + keyInfo.getAclsList()); } @Override diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java index cc181f7549afb..0484f4b00e862 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java @@ -36,6 +36,7 @@ import java.util.HashMap; import java.util.Objects; +import static org.apache.hadoop.ozone.OzoneAcl.ZERO_BITSET; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights.ALL; @@ -81,8 +82,17 @@ public void addAcl(OzoneAcl acl) throws OMException { if (!getMap(aclType).containsKey(acl.getName())) { getMap(aclType).put(acl.getName(), acl.getAclBitSet()); } else { - // throw exception if acl is already added. - throw new OMException("Acl " + acl + " already exist.", INVALID_REQUEST); + // Check if we are adding new rights to existing acl. + BitSet temp = (BitSet) acl.getAclBitSet().clone(); + BitSet curRights = (BitSet) getMap(aclType).get(acl.getName()).clone(); + temp.or(curRights); + + if (temp.equals(curRights)) { + // throw exception if acl is already added. + throw new OMException("Acl " + acl + " already exist.", + INVALID_REQUEST); + } + getMap(aclType).get(acl.getName()).or(acl.getAclBitSet()); } } @@ -105,9 +115,25 @@ public void removeAcl(OzoneAcl acl) throws OMException { Objects.requireNonNull(acl, "Acl should not be null."); OzoneAclType aclType = OzoneAclType.valueOf(acl.getType().name()); if (getMap(aclType).containsKey(acl.getName())) { - getMap(aclType).remove(acl.getName()); + BitSet aclRights = getMap(aclType).get(acl.getName()); + BitSet bits = (BitSet) acl.getAclBitSet().clone(); + bits.and(aclRights); + + if (bits.equals(ZERO_BITSET)) { + // throw exception if acl doesn't exist. + throw new OMException("Acl [" + acl + "] doesn't exist.", + INVALID_REQUEST); + } + + acl.getAclBitSet().and(aclRights); + aclRights.xor(acl.getAclBitSet()); + + // Remove the acl as all rights are already set to 0. + if (aclRights.equals(ZERO_BITSET)) { + getMap(aclType).remove(acl.getName()); + } } else { - // throw exception if acl is already added. + // throw exception if acl doesn't exist. throw new OMException("Acl [" + acl + "] doesn't exist.", INVALID_REQUEST); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 7d1e4151a7110..c93ed3cabac83 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -668,6 +668,11 @@ public OpenKeySession openKey(OmKeyArgs args) throws IOException { .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()); + if(args.getAcls() != null) { + keyArgs.addAllAcls(args.getAcls().stream().distinct().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + } + if (args.getFactor() != null) { keyArgs.setFactor(args.getFactor()); } @@ -991,6 +996,8 @@ public OmMultipartInfo initiateMultipartUpload(OmKeyArgs omKeyArgs) throws .setBucketName(omKeyArgs.getBucketName()) .setKeyName(omKeyArgs.getKeyName()) .setFactor(omKeyArgs.getFactor()) + .addAllAcls(omKeyArgs.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .setType(omKeyArgs.getType()); multipartInfoInitiateRequest.setKeyArgs(keyArgs.build()); @@ -1276,6 +1283,8 @@ public void createDirectory(OmKeyArgs args) throws IOException { .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) + .addAllAcls(args.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .build(); CreateDirectoryRequest request = CreateDirectoryRequest.newBuilder() .setKeyArgs(keyArgs) @@ -1412,6 +1421,8 @@ public OpenKeySession createFile(OmKeyArgs args, .setDataSize(args.getDataSize()) .setType(args.getType()) .setFactor(args.getFactor()) + .addAllAcls(args.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .build(); CreateFileRequest createFileRequest = CreateFileRequest.newBuilder() .setKeyArgs(keyArgs) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java index 16df10fb274aa..cbb9fb8e21a6d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java @@ -16,10 +16,10 @@ */ package org.apache.hadoop.ozone.security.acl; -import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import java.util.StringTokenizer; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; /** * Class representing an ozone object. @@ -45,16 +45,14 @@ public String getPath() { case VOLUME: return getVolumeName(); case BUCKET: - return getVolumeName() + OzoneConsts.OZONE_URI_DELIMITER - + getBucketName(); + return getVolumeName() + OZONE_URI_DELIMITER + getBucketName(); case KEY: - return getVolumeName() + OzoneConsts.OZONE_URI_DELIMITER - + getBucketName() + OzoneConsts.OZONE_URI_DELIMITER + getKeyName(); + return getVolumeName() + OZONE_URI_DELIMITER + getBucketName() + + OZONE_URI_DELIMITER + getKeyName(); default: throw new IllegalArgumentException("Unknown resource " + "type" + getResourceType()); } - } @Override @@ -77,25 +75,36 @@ public static OzoneObjInfo fromProtobuf(OzoneManagerProtocolProtos.OzoneObj Builder builder = new Builder() .setResType(ResourceType.valueOf(proto.getResType().name())) .setStoreType(StoreType.valueOf(proto.getStoreType().name())); - StringTokenizer tokenizer = new StringTokenizer(proto.getPath(), - OzoneConsts.OZONE_URI_DELIMITER); - // Set volume name. - if (tokenizer.hasMoreTokens()) { - builder.setVolumeName(tokenizer.nextToken()); - } - // Set bucket name. - if (tokenizer.hasMoreTokens()) { - builder.setBucketName(tokenizer.nextToken()); + String[] tokens = StringUtils.splitPreserveAllTokens(proto.getPath(), + OZONE_URI_DELIMITER); + if(tokens == null) { + throw new IllegalArgumentException("Unexpected path:" + proto.getPath()); } - // Set key name - if (tokenizer.hasMoreTokens()) { - StringBuffer sb = new StringBuffer(); - while (tokenizer.hasMoreTokens()) { - sb.append(OzoneConsts.OZONE_URI_DELIMITER); - sb.append(tokenizer.nextToken()); - sb.append(OzoneConsts.OZONE_URI_DELIMITER); + // Set volume name. + switch (proto.getResType()) { + case VOLUME: + builder.setVolumeName(tokens[0]); + break; + case BUCKET: + if (tokens.length < 2) { + throw new IllegalArgumentException("Unexpected argument for " + + "Ozone key. Path:" + proto.getPath()); + } + builder.setVolumeName(tokens[0]); + builder.setBucketName(tokens[1]); + break; + case KEY: + if (tokens.length != 3) { + throw new IllegalArgumentException("Unexpected argument for " + + "Ozone key. Path:" + proto.getPath()); } - builder.setKeyName(sb.toString()); + builder.setVolumeName(tokens[0]); + builder.setBucketName(tokens[1]); + builder.setKeyName(tokens[2]); + break; + default: + throw new IllegalArgumentException("Unexpected type for " + + "Ozone key. Type:" + proto.getResType()); } return builder.build(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java index 24840910300e3..70bc3ed3e2394 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java @@ -23,6 +23,8 @@ import java.nio.charset.Charset; import java.text.ParseException; import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.List; import java.util.Locale; import java.util.TimeZone; import java.util.UUID; @@ -31,11 +33,16 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.ratis.util.TimeDuration; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; + /** * Set of Utility functions used in ozone. */ @@ -236,4 +243,26 @@ public static long getTimeDurationInMS(Configuration conf, String key, return getTimeDuration(conf, key, defaultValue) .toLong(TimeUnit.MILLISECONDS); } + + /** + * Helper function to get deafult acl list for current user. + * + * @param userName + * @param userGroups + * @return listOfAcls + * */ + public static List getAclList(String userName, + List userGroups, ACLType userRights, ACLType groupRights) { + + List listOfAcls = new ArrayList<>(); + + // User ACL. + listOfAcls.add(new OzoneAcl(USER, userName, userRights)); + if(userGroups != null) { + // Group ACLs of the User. + userGroups.stream().forEach((group) -> listOfAcls.add( + new OzoneAcl(GROUP, group, groupRights))); + } + return listOfAcls; + } } diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto index 8e8d401a7f708..ef6b14c1dc971 100644 --- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto +++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto @@ -600,6 +600,7 @@ message KeyArgs { optional string multipartUploadID = 9; optional uint32 multipartNumber = 10; repeated hadoop.hdds.KeyValue metadata = 11; + repeated OzoneAclInfo acls = 12; } message KeyLocation { @@ -637,6 +638,7 @@ message KeyInfo { optional uint64 latestVersion = 10; repeated hadoop.hdds.KeyValue metadata = 11; optional FileEncryptionInfoProto fileEncryptionInfo = 12; + repeated OzoneAclInfo acls = 13; } message OzoneFileStatusProto { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index e6224ab044ff8..b21c85d48ace0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -84,8 +84,10 @@ import org.apache.hadoop.ozone.s3.util.OzoneS3Util; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.Time; @@ -94,6 +96,9 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomUtils; + +import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; +import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.either; import org.junit.Assert; @@ -607,8 +612,8 @@ public void testPutKey() String keyName = UUID.randomUUID().toString(); OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>()); + value.getBytes().length, STAND_ALONE, + ONE, new HashMap<>()); out.write(value.getBytes()); out.close(); OzoneKey key = bucket.getKey(keyName); @@ -617,8 +622,8 @@ public void testPutKey() byte[] fileContent = new byte[value.getBytes().length]; is.read(fileContent); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE)); + keyName, STAND_ALONE, + ONE)); Assert.assertEquals(value, new String(fileContent)); Assert.assertTrue(key.getCreationTime() >= currentTime); Assert.assertTrue(key.getModificationTime() >= currentTime); @@ -639,7 +644,7 @@ public void testValidateBlockLengthWithCommitKey() throws IOException { // create the initial key with size 0, write will allocate the first block. OzoneOutputStream out = bucket.createKey(keyName, 0, - ReplicationType.STAND_ALONE, ReplicationFactor.ONE, new HashMap<>()); + STAND_ALONE, ONE, new HashMap<>()); out.write(value.getBytes()); out.close(); OmKeyArgs.Builder builder = new OmKeyArgs.Builder(); @@ -677,7 +682,7 @@ public void testPutKeyRatisOneNode() OzoneOutputStream out = bucket.createKey(keyName, value.getBytes().length, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + ONE, new HashMap<>()); out.write(value.getBytes()); out.close(); OzoneKey key = bucket.getKey(keyName); @@ -687,7 +692,7 @@ public void testPutKeyRatisOneNode() is.read(fileContent); is.close(); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, ReplicationType.RATIS, ReplicationFactor.ONE)); + keyName, ReplicationType.RATIS, ONE)); Assert.assertEquals(value, new String(fileContent)); Assert.assertTrue(key.getCreationTime() >= currentTime); Assert.assertTrue(key.getModificationTime() >= currentTime); @@ -832,7 +837,7 @@ private void createAndCorruptKey(String volumeName, String bucketName, // Write data into a key OzoneOutputStream out = bucket.createKey(keyName, value.getBytes().length, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + ONE, new HashMap<>()); out.write(value.getBytes()); out.close(); @@ -904,8 +909,8 @@ public void testGetKeyDetails() throws IOException, OzoneException { //String keyValue = "this is a test value.glx"; // create the initial key with size 0, write will allocate the first block. OzoneOutputStream out = bucket.createKey(keyName, - keyValue.getBytes().length, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>()); + keyValue.getBytes().length, STAND_ALONE, + ONE, new HashMap<>()); out.write(keyValue.getBytes()); out.close(); @@ -993,7 +998,7 @@ public void testReadKeyWithCorruptedData() throws IOException { // Write data into a key OzoneOutputStream out = bucket.createKey(keyName, value.getBytes().length, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); + ONE, new HashMap<>()); out.write(value.getBytes()); out.close(); @@ -1161,8 +1166,8 @@ public void testDeleteKey() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>()); + value.getBytes().length, STAND_ALONE, + ONE, new HashMap<>()); out.write(value.getBytes()); out.close(); OzoneKey key = bucket.getKey(keyName); @@ -1185,8 +1190,8 @@ public void testRenameKey() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OzoneOutputStream out = bucket.createKey(fromKeyName, - value.getBytes().length, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>()); + value.getBytes().length, STAND_ALONE, + ONE, new HashMap<>()); out.write(value.getBytes()); out.close(); OzoneKey key = bucket.getKey(fromKeyName); @@ -1380,25 +1385,25 @@ public void testListKey() byte[] value = RandomStringUtils.randomAscii(10240).getBytes(); OzoneOutputStream one = volAbucketA.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + value.length, STAND_ALONE, ONE, new HashMap<>()); one.write(value); one.close(); OzoneOutputStream two = volAbucketB.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + value.length, STAND_ALONE, ONE, new HashMap<>()); two.write(value); two.close(); OzoneOutputStream three = volBbucketA.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + value.length, STAND_ALONE, ONE, new HashMap<>()); three.write(value); three.close(); OzoneOutputStream four = volBbucketB.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + value.length, STAND_ALONE, ONE, new HashMap<>()); four.write(value); four.close(); @@ -1413,25 +1418,25 @@ public void testListKey() byte[] value = RandomStringUtils.randomAscii(10240).getBytes(); OzoneOutputStream one = volAbucketA.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + value.length, STAND_ALONE, ONE, new HashMap<>()); one.write(value); one.close(); OzoneOutputStream two = volAbucketB.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + value.length, STAND_ALONE, ONE, new HashMap<>()); two.write(value); two.close(); OzoneOutputStream three = volBbucketA.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + value.length, STAND_ALONE, ONE, new HashMap<>()); three.write(value); three.close(); OzoneOutputStream four = volBbucketB.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + value.length, STAND_ALONE, ONE, new HashMap<>()); four.write(value); four.close(); @@ -1512,7 +1517,7 @@ public void testInitiateMultipartUploadWithReplicationInformationSet() throws volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - ReplicationType.STAND_ALONE, ReplicationFactor.ONE); + STAND_ALONE, ONE); assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -1524,7 +1529,7 @@ public void testInitiateMultipartUploadWithReplicationInformationSet() throws // Call initiate multipart upload for the same key again, this should // generate a new uploadID. multipartInfo = bucket.initiateMultipartUpload(keyName, - ReplicationType.STAND_ALONE, ReplicationFactor.ONE); + STAND_ALONE, ONE); assertNotNull(multipartInfo); Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); @@ -1580,7 +1585,7 @@ public void testUploadPartWithNoOverride() throws IOException { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - ReplicationType.STAND_ALONE, ReplicationFactor.ONE); + STAND_ALONE, ONE); assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -1618,7 +1623,7 @@ public void testUploadPartOverrideWithStandAlone() throws IOException { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - ReplicationType.STAND_ALONE, ReplicationFactor.ONE); + STAND_ALONE, ONE); assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -1746,7 +1751,6 @@ public void testMultipartUpload() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); doMultipartUpload(bucket, keyName, (byte)98); - } @@ -1782,18 +1786,18 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); // Initiate multipart upload - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); // Upload Parts Map partsMap = new TreeMap<>(); // Uploading part 1 with less than min size - String partName = uploadPart(bucket, keyName, uploadID, 1, "data".getBytes( - UTF_8)); + String partName = uploadPart(bucket, keyName, uploadID, 1, + "data".getBytes(UTF_8)); partsMap.put(1, partName); - partName = uploadPart(bucket, keyName, uploadID, 2, "data".getBytes( - UTF_8)); + partName = uploadPart(bucket, keyName, uploadID, 2, + "data".getBytes(UTF_8)); partsMap.put(2, partName); @@ -1815,8 +1819,8 @@ public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); // We have not uploaded any parts, but passing some list it should throw // error. @@ -1840,8 +1844,8 @@ public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); // We have not uploaded any parts, but passing some list it should throw @@ -1865,8 +1869,8 @@ public void testMultipartUploadWithMissingParts() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); // We have not uploaded any parts, but passing some list it should throw @@ -1905,8 +1909,8 @@ public void testAbortUploadSuccessWithOutAnyParts() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); bucket.abortMultipartUpload(keyName, uploadID); } @@ -1921,8 +1925,8 @@ public void testAbortUploadSuccessWithParts() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); bucket.abortMultipartUpload(keyName, uploadID); } @@ -1939,8 +1943,8 @@ public void testListMultipartUploadParts() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); String partName1 = uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(1, partName1); @@ -1956,7 +1960,7 @@ public void testListMultipartUploadParts() throws Exception { OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 3); - Assert.assertEquals(ReplicationType.STAND_ALONE, + Assert.assertEquals(STAND_ALONE, ozoneMultipartUploadPartListParts.getReplicationType()); Assert.assertEquals(3, ozoneMultipartUploadPartListParts.getPartInfoList().size()); @@ -1990,8 +1994,8 @@ public void testListMultipartUploadPartsWithContinuation() OzoneBucket bucket = volume.getBucket(bucketName); Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); String partName1 = uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(1, partName1); @@ -2007,7 +2011,7 @@ public void testListMultipartUploadPartsWithContinuation() OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2); - Assert.assertEquals(ReplicationType.STAND_ALONE, + Assert.assertEquals(STAND_ALONE, ozoneMultipartUploadPartListParts.getReplicationType()); Assert.assertEquals(2, @@ -2095,8 +2099,8 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .STAND_ALONE, ReplicationFactor.ONE); + String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + ONE); uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); @@ -2108,7 +2112,7 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() Assert.assertEquals(0, ozoneMultipartUploadPartListParts.getPartInfoList().size()); - Assert.assertEquals(ReplicationType.STAND_ALONE, + Assert.assertEquals(STAND_ALONE, ozoneMultipartUploadPartListParts.getReplicationType()); // As we don't have any parts with greater than partNumberMarker and list @@ -2138,54 +2142,43 @@ public void testListPartsWithInvalidUploadID() throws Exception { public void testNativeAclsForVolume() throws Exception { String volumeName = UUID.randomUUID().toString(); store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); + OzoneObj ozObj = new OzoneObjInfo.Builder() .setVolumeName(volumeName) .setResType(OzoneObj.ResourceType.VOLUME) .setStoreType(OzoneObj.StoreType.OZONE) .build(); - // Get acls for volume. - List volAcls = store.getAcl(ozObj); - volAcls.forEach(a -> assertTrue(volume.getAcls().contains(a))); - // Remove all acl's. - for (OzoneAcl a : volAcls) { - store.removeAcl(ozObj, a); - } - List newAcls = store.getAcl(ozObj); - OzoneVolume finalVolume = store.getVolume(volumeName); - assertTrue(finalVolume.getAcls().size() == 0); - assertTrue(newAcls.size() == 0); - - // Add acl's and then call getAcl. - for (OzoneAcl a : volAcls) { - // Try removing an acl which doesn't exist, it should return false. - assertFalse(finalVolume.getAcls().contains(a)); - assertFalse(store.removeAcl(ozObj, a)); + validateOzoneAcl(ozObj); + } - assertTrue(store.addAcl(ozObj, a)); - finalVolume = store.getVolume(volumeName); - assertTrue(finalVolume.getAcls().contains(a)); + @Test + public void testNativeAclsForBucket() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); - // Call addAcl again, this time operation will fail as - // acl is already added. - assertFalse(store.addAcl(ozObj, a)); - } - assertTrue(finalVolume.getAcls().size() == volAcls.size()); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + assertNotNull("Bucket creation failed", bucket); + OzoneObj ozObj = new OzoneObjInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setResType(OzoneObj.ResourceType.BUCKET) + .setStoreType(OzoneObj.StoreType.OZONE) + .build(); - // Reset acl's. - store.setAcl(ozObj, newAcls); - finalVolume = store.getVolume(volumeName); - newAcls = store.getAcl(ozObj); - assertTrue(newAcls.size() == 0); - assertTrue(finalVolume.getAcls().size() == 0); + validateOzoneAcl(ozObj); } @Test - public void testNativeAclsForBucket() throws Exception { + public void testNativeAclsForKey() throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); + String key1 = UUID.randomUUID().toString(); + String key2 = UUID.randomUUID().toString(); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); @@ -2193,48 +2186,121 @@ public void testNativeAclsForBucket() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); assertNotNull("Bucket creation failed", bucket); + writeKey(key1, bucket); + writeKey(key2, bucket); + OzoneObj ozObj = new OzoneObjInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) - .setResType(OzoneObj.ResourceType.BUCKET) + .setKeyName(key1) + .setResType(OzoneObj.ResourceType.KEY) .setStoreType(OzoneObj.StoreType.OZONE) .build(); + + validateOzoneAcl(ozObj); + } + + /** + * Helper function to get default acl list for current user. + * + * @return list of default Acls. + * @throws IOException + * */ + private List getAclList(OzoneConfiguration conf) + throws IOException { + List listOfAcls = new ArrayList<>(); + //User ACL + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); + ACLType userRights = aclConfig.getUserDefaultRights(); + ACLType groupRights = aclConfig.getGroupDefaultRights(); + + listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, + ugi.getUserName(), userRights)); + //Group ACLs of the User + List userGroups = Arrays.asList(UserGroupInformation + .createRemoteUser(ugi.getUserName()).getGroupNames()); + userGroups.stream().forEach((group) -> listOfAcls.add( + new OzoneAcl(ACLIdentityType.GROUP, group, groupRights))); + return listOfAcls; + } + + /** + * Helper function to validate ozone Acl for given object. + * @param ozObj + * */ + private void validateOzoneAcl(OzoneObj ozObj) throws IOException { // Get acls for volume. - List volAcls = store.getAcl(ozObj); - volAcls.forEach(a -> assertTrue(bucket.getAcls().contains(a))); + List expectedAcls = getAclList(new OzoneConfiguration()); + + // Case:1 Add new acl permission to existing acl. + if(expectedAcls.size()>0) { + OzoneAcl oldAcl = expectedAcls.get(0); + OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(), + ACLType.READ_ACL); + // Verify that operation successful. + assertTrue(store.addAcl(ozObj, newAcl)); + List acls = store.getAcl(ozObj); + + assertTrue(acls.size() == expectedAcls.size()); + boolean aclVerified = false; + for(OzoneAcl acl: acls) { + if(acl.getName().equals(newAcl.getName())) { + assertTrue(acl.getAclList().contains(ACLType.READ_ACL)); + aclVerified = true; + } + } + assertTrue("New acl expected but not found.", aclVerified); + aclVerified = false; + + // Case:2 Remove newly added acl permission. + assertTrue(store.removeAcl(ozObj, newAcl)); + acls = store.getAcl(ozObj); + assertTrue(acls.size() == expectedAcls.size()); + for(OzoneAcl acl: acls) { + if(acl.getName().equals(newAcl.getName())) { + assertFalse(acl.getAclList().contains(ACLType.READ_ACL)); + aclVerified = true; + } + } + assertTrue("New acl expected but not found.", aclVerified); + } else { + fail("Default acl should not be empty."); + } + + List keyAcls = store.getAcl(ozObj); + expectedAcls.forEach(a -> assertTrue(keyAcls.contains(a))); // Remove all acl's. - for (OzoneAcl a : volAcls) { - assertTrue(store.removeAcl(ozObj, a)); + for (OzoneAcl a : expectedAcls) { + store.removeAcl(ozObj, a); } List newAcls = store.getAcl(ozObj); - OzoneBucket finalBuck = volume.getBucket(bucketName); - assertTrue(finalBuck.getAcls().size() == 0); assertTrue(newAcls.size() == 0); // Add acl's and then call getAcl. - for (OzoneAcl a : volAcls) { - // Try removing an acl which doesn't exist, it should return false. - assertFalse(finalBuck.getAcls().contains(a)); - assertFalse(store.removeAcl(ozObj, a)); - - // Add acl should succeed. + int aclCount = 0; + for (OzoneAcl a : expectedAcls) { + aclCount++; assertTrue(store.addAcl(ozObj, a)); - finalBuck = volume.getBucket(bucketName); - assertTrue(finalBuck.getAcls().contains(a)); - - // Call addAcl again, this time operation will return false as - // acl is already added. - assertFalse(store.addAcl(ozObj, a)); + assertTrue(store.getAcl(ozObj).size() == aclCount); } - assertTrue(finalBuck.getAcls().size() == volAcls.size()); + newAcls = store.getAcl(ozObj); + assertTrue(newAcls.size() == expectedAcls.size()); + List finalNewAcls = newAcls; + expectedAcls.forEach(a -> assertTrue(finalNewAcls.contains(a))); // Reset acl's. - store.setAcl(ozObj, newAcls); - finalBuck = volume.getBucket(bucketName); + store.setAcl(ozObj, new ArrayList<>()); newAcls = store.getAcl(ozObj); assertTrue(newAcls.size() == 0); - assertTrue(finalBuck.getAcls().size() == 0); + } + + private void writeKey(String key1, OzoneBucket bucket) throws IOException { + OzoneOutputStream out = bucket.createKey(key1, 1024, STAND_ALONE, + ONE, new HashMap<>()); + out.write(RandomStringUtils.random(1024).getBytes()); + out.close(); } private byte[] generateData(int size, byte val) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java index 529d476e615ff..08e4130486659 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java @@ -112,6 +112,7 @@ public void testWriteRead() throws Exception { String dataString = RandomStringUtils.randomAscii(3 * (int)OzoneConsts.MB); KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); keyArgs.setSize(3 * (int)OzoneConsts.MB); + keyArgs.setUserName(userName); try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) { outputStream.write(dataString.getBytes()); @@ -190,6 +191,7 @@ public void testPartialRead() throws Exception { String dataString = RandomStringUtils.randomAscii(500); KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); keyArgs.setSize(500); + keyArgs.setUserName(userName); try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) { outputStream.write(dataString.getBytes()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java index a1b20bd5881f9..25314db554e87 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java @@ -44,6 +44,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.util.ArrayList; import java.util.LinkedList; import java.util.List; @@ -116,6 +117,7 @@ public void testAllocateCommit() throws Exception { .setKeyName(keyName) .setDataSize(1000) .setRefreshPipeline(true) + .setAcls(new ArrayList<>()) .build(); // 1st update, version 0 @@ -220,6 +222,7 @@ public void testReadLatestVersion() throws Exception { String dataString = RandomStringUtils.randomAlphabetic(100); KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); + keyArgs.setUserName(userName); // this write will create 1st version with one block try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { stream.write(dataString.getBytes()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java index 6d4702fa9340b..1057e7a23ba28 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java @@ -486,6 +486,7 @@ public void testDeleteNonEmptyBucket() throws Exception { String dataString = RandomStringUtils.randomAscii(100); KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); keyArgs.setSize(100); + keyArgs.setUserName(userName); try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { stream.write(dataString.getBytes()); } @@ -525,6 +526,7 @@ public void testGetKeyWriterReader() throws IOException, OzoneException { String dataString = RandomStringUtils.randomAscii(100); KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); keyArgs.setSize(100); + keyArgs.setUserName(userName); try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { stream.write(dataString.getBytes()); } @@ -567,6 +569,7 @@ public void testKeyOverwrite() throws IOException, OzoneException { KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); keyArgs.setSize(100); + keyArgs.setUserName(userName); String dataString = RandomStringUtils.randomAscii(100); try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { stream.write(dataString.getBytes()); @@ -577,6 +580,7 @@ public void testKeyOverwrite() throws IOException, OzoneException { // That is this overwrite only overwrites the keys on OM. We need to // garbage collect those blocks from datanode. KeyArgs keyArgs2 = new KeyArgs(volumeName, bucketName, keyName, userArgs); + keyArgs2.setUserName(userName); storageHandler.newKeyWriter(keyArgs2); Assert .assertEquals(numKeyAllocateFails, omMetrics.getNumKeyAllocateFails()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java index 4b6118715e7eb..400286855cea5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om; import java.io.IOException; +import java.util.Collections; import java.util.List; import java.util.Map; import org.apache.commons.lang3.RandomStringUtils; @@ -134,6 +135,7 @@ public void testSafeModeOperations() throws Exception { .setBucketName(bucketName) .setKeyName(keyName) .setDataSize(1000) + .setAcls(Collections.emptyList()) .build(); OmVolumeArgs volArgs = new OmVolumeArgs.Builder() .setAdminName(adminName) diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java index 42c7238427db3..a4aa361e39862 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java @@ -66,7 +66,9 @@ import java.io.IOException; import java.io.OutputStream; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; +import java.util.Objects; import java.util.concurrent.TimeUnit; /** @@ -444,6 +446,8 @@ public BucketInfo getBucketInfo(BucketArgs args) @Override public OutputStream newKeyWriter(KeyArgs args) throws IOException, OzoneException { + Objects.requireNonNull(args.getUserName(), + "Username should not be null"); OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) @@ -451,6 +455,9 @@ public OutputStream newKeyWriter(KeyArgs args) throws IOException, .setDataSize(args.getSize()) .setType(xceiverClientManager.getType()) .setFactor(xceiverClientManager.getFactor()) + .setAcls(OzoneUtils.getAclList(args.getUserName(), + args.getGroups() != null ? Arrays.asList(args.getGroups()) : null, + ACLType.ALL, ACLType.ALL)) .build(); // contact OM to allocate a block for key. OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 18cc266a11617..ea8f5f052171f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -17,6 +17,8 @@ package org.apache.hadoop.ozone.om; import java.io.IOException; +import java.util.ArrayList; +import java.util.BitSet; import java.util.List; import java.util.Objects; @@ -40,6 +42,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.ozone.OzoneAcl.ZERO_BITSET; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; /** @@ -404,27 +407,44 @@ public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { throw new OMException("Bucket " + bucket + " is not found", BUCKET_NOT_FOUND); } - List list = bucketInfo.getAcls(); - if(!validateAddAcl(acl, list)) { - // New acl can't be added as it is not consistent with existing ACLs. - LOG.info("New acl:{} can't be added as it is not consistent with " + - "existing ACLs:{}.", acl, StringUtils.join(",", list)); - return false; + + // Case 1: When we are adding more rights to existing user/group. + boolean addToExistingAcl = false; + for(OzoneAcl a: bucketInfo.getAcls()) { + if(a.getName().equals(acl.getName()) && + a.getType().equals(acl.getType())) { + BitSet bits = (BitSet) acl.getAclBitSet().clone(); + bits.or(a.getAclBitSet()); + + if (bits.equals(a.getAclBitSet())) { + return false; + } + a.getAclBitSet().or(acl.getAclBitSet()); + addToExistingAcl = true; + break; + } } - list.add(acl); - OmBucketInfo updatedBucket = OmBucketInfo.newBuilder() - .setVolumeName(bucketInfo.getVolumeName()) - .setBucketName(bucketInfo.getBucketName()) - .setStorageType(bucketInfo.getStorageType()) - .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) - .setCreationTime(bucketInfo.getCreationTime()) - .setBucketEncryptionKey(bucketInfo.getEncryptionKeyInfo()) - .addAllMetadata(bucketInfo.getMetadata()) - .setAcls(list) - .build(); - // TODO:HDDS-1619 OM HA changes required for all acl operations. - metadataManager.getBucketTable().put(dbBucketKey, updatedBucket); + // Case 2: When a completely new acl is added. + if(!addToExistingAcl) { + List newAcls = bucketInfo.getAcls(); + if(newAcls == null) { + newAcls = new ArrayList<>(); + } + newAcls.add(acl); + bucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(bucketInfo.getVolumeName()) + .setBucketName(bucketInfo.getBucketName()) + .setStorageType(bucketInfo.getStorageType()) + .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) + .setCreationTime(bucketInfo.getCreationTime()) + .setBucketEncryptionKey(bucketInfo.getEncryptionKeyInfo()) + .addAllMetadata(bucketInfo.getMetadata()) + .setAcls(newAcls) + .build(); + } + + metadataManager.getBucketTable().put(dbBucketKey, bucketInfo); } catch (IOException ex) { if (!(ex instanceof OMException)) { LOG.error("Add acl operation failed for bucket:{}/{} acl:{}", @@ -466,26 +486,31 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { throw new OMException("Bucket " + bucket + " is not found", BUCKET_NOT_FOUND); } - List list = bucketInfo.getAcls(); - if (!list.contains(acl)) { - // Return false if acl doesn't exist in current ACLs. - LOG.info("Acl:{} not found in existing ACLs:{}.", acl, - StringUtils.join(",", list)); - return false; + + // When we are removing subset of rights from existing acl. + for(OzoneAcl a: bucketInfo.getAcls()) { + if(a.getName().equals(acl.getName()) && + a.getType().equals(acl.getType())) { + BitSet bits = (BitSet) acl.getAclBitSet().clone(); + bits.and(a.getAclBitSet()); + + if (bits.equals(ZERO_BITSET)) { + return false; + } + bits = (BitSet) acl.getAclBitSet().clone(); + bits.and(a.getAclBitSet()); + a.getAclBitSet().xor(bits); + + if(a.getAclBitSet().equals(ZERO_BITSET)) { + bucketInfo.getAcls().remove(a); + } + break; + } else { + return false; + } } - list.remove(acl); - OmBucketInfo updatedBucket = OmBucketInfo.newBuilder() - .setVolumeName(bucketInfo.getVolumeName()) - .setBucketName(bucketInfo.getBucketName()) - .setStorageType(bucketInfo.getStorageType()) - .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) - .setCreationTime(bucketInfo.getCreationTime()) - .setBucketEncryptionKey(bucketInfo.getEncryptionKeyInfo()) - .addAllMetadata(bucketInfo.getMetadata()) - .setAcls(list) - .build(); - metadataManager.getBucketTable().put(dbBucketKey, updatedBucket); + metadataManager.getBucketTable().put(dbBucketKey, bucketInfo); } catch (IOException ex) { if (!(ex instanceof OMException)) { LOG.error("Remove acl operation failed for bucket:{}/{} acl:{}", @@ -552,23 +577,6 @@ public boolean setAcl(OzoneObj obj, List acls) throws IOException { return true; } - /** - * Validates if a new acl addition is consistent with current ACL list. - * @param newAcl new acl to be added. - * @param currentAcls list of acls. - * - * @return true if newAcl addition to existing acls is valid, else false. - * */ - private boolean validateAddAcl(OzoneAcl newAcl, List currentAcls) { - - // Check 1: Check for duplicate. - if(currentAcls.contains(newAcl)) { - return false; - } - - return true; - } - /** * Returns list of ACLs for given Ozone object. * diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index 0006e93fa9ff7..51c0cfae631b3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -18,6 +18,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -35,6 +36,7 @@ .KeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .KeyLocation; +import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.utils.BackgroundService; import java.io.IOException; @@ -285,4 +287,42 @@ OmMultipartUploadListParts listParts(String volumeName, String bucketName, String keyName, String uploadID, int partNumberMarker, int maxParts) throws IOException; + /** + * Add acl for Ozone object. Return true if acl is added successfully else + * false. + * @param obj Ozone object for which acl should be added. + * @param acl ozone acl top be added. + * + * @throws IOException if there is error. + * */ + boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException; + + /** + * Remove acl for Ozone object. Return true if acl is removed successfully + * else false. + * @param obj Ozone object. + * @param acl Ozone acl to be removed. + * + * @throws IOException if there is error. + * */ + boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException; + + /** + * Acls to be set for given Ozone object. This operations reset ACL for + * given object to list of ACLs provided in argument. + * @param obj Ozone object. + * @param acls List of acls. + * + * @throws IOException if there is error. + * */ + boolean setAcl(OzoneObj obj, List acls) throws IOException; + + /** + * Returns list of ACLs for given Ozone object. + * @param obj Ozone object. + * + * @throws IOException if there is error. + * */ + List getAcl(OzoneObj obj) throws IOException; + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 895a47ae37582..0deda0dfafc77 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -26,11 +26,13 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.TreeMap; import java.util.UUID; import java.util.concurrent.TimeUnit; import java.security.GeneralSecurityException; import java.security.PrivilegedExceptionAction; +import java.util.stream.Collectors; import com.google.common.base.Strings; import org.apache.commons.codec.digest.DigestUtils; @@ -49,6 +51,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; @@ -74,7 +77,11 @@ .KeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .KeyLocation; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights; import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.ozone.common.BlockGroup; @@ -107,6 +114,10 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY; import static org.apache.hadoop.util.Time.monotonicNow; import org.slf4j.Logger; @@ -217,14 +228,14 @@ private void validateBucket(String volumeName, String bucketName) if (metadataManager.getVolumeTable().get(volumeKey) == null) { LOG.error("volume not found: {}", volumeName); throw new OMException("Volume not found", - OMException.ResultCodes.VOLUME_NOT_FOUND); + VOLUME_NOT_FOUND); } // if the volume exists but bucket does not exist, throw bucket not found // exception LOG.error("bucket not found: {}/{} ", volumeName, bucketName); throw new OMException("Bucket not found", - OMException.ResultCodes.BUCKET_NOT_FOUND); + BUCKET_NOT_FOUND); } } @@ -242,7 +253,7 @@ private void validateS3Bucket(String volumeName, String bucketName) if (metadataManager.getBucketTable().get(bucketKey) == null) { LOG.error("bucket not found: {}/{} ", volumeName, bucketName); throw new OMException("Bucket not found", - ResultCodes.BUCKET_NOT_FOUND); + BUCKET_NOT_FOUND); } } @@ -265,7 +276,7 @@ public OmKeyLocationInfo addAllocatedBlock(OmKeyArgs args, long clientID, LOG.error("Allocate block for a key not in open status in meta store" + " /{}/{}/{} with ID {}", volumeName, bucketName, keyName, clientID); throw new OMException("Open Key not found", - OMException.ResultCodes.KEY_NOT_FOUND); + KEY_NOT_FOUND); } OmKeyLocationInfo omKeyLocationInfo = @@ -294,7 +305,7 @@ public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, LOG.error("Allocate block for a key not in open status in meta store" + " /{}/{}/{} with ID {}", volumeName, bucketName, keyName, clientID); throw new OMException("Open Key not found", - OMException.ResultCodes.KEY_NOT_FOUND); + KEY_NOT_FOUND); } // current version not committed, so new blocks coming now are added to @@ -401,6 +412,9 @@ public EncryptedKeyVersion run() throws IOException { @Override public OpenKeySession openKey(OmKeyArgs args) throws IOException { Preconditions.checkNotNull(args); + Preconditions.checkNotNull(args.getAcls(), "Default acls " + + "should be set."); + String volumeName = args.getVolumeName(); String bucketName = args.getBucketName(); String keyName = args.getKeyName(); @@ -597,7 +611,7 @@ private OmKeyInfo createKeyInfo(OmKeyArgs keyArgs, ReplicationFactor factor, ReplicationType type, long size, FileEncryptionInfo encInfo) { - return new OmKeyInfo.Builder() + OmKeyInfo.Builder builder = new OmKeyInfo.Builder() .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) @@ -608,8 +622,12 @@ private OmKeyInfo createKeyInfo(OmKeyArgs keyArgs, .setDataSize(size) .setReplicationType(type) .setReplicationFactor(factor) - .setFileEncryptionInfo(encInfo) - .build(); + .setFileEncryptionInfo(encInfo); + if(keyArgs.getAcls() != null) { + builder.setAcls(keyArgs.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + } + return builder.build(); } @Override @@ -628,7 +646,7 @@ public void commitKey(OmKeyArgs args, long clientID) throws IOException { OmKeyInfo keyInfo = metadataManager.getOpenKeyTable().get(openKey); if (keyInfo == null) { throw new OMException("Commit a key without corresponding entry " + - objectKey, ResultCodes.KEY_NOT_FOUND); + objectKey, KEY_NOT_FOUND); } keyInfo.setDataSize(args.getDataSize()); keyInfo.setModificationTime(Time.now()); @@ -670,7 +688,7 @@ public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { LOG.debug("volume:{} bucket:{} Key:{} not found", volumeName, bucketName, keyName); throw new OMException("Key not found", - OMException.ResultCodes.KEY_NOT_FOUND); + KEY_NOT_FOUND); } if (grpcBlockTokenEnabled) { String remoteUser = getRemoteUser().getShortUserName(); @@ -715,7 +733,7 @@ public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { LOG.debug("Get key failed for volume:{} bucket:{} key:{}", volumeName, bucketName, keyName, ex); throw new OMException(ex.getMessage(), - OMException.ResultCodes.KEY_NOT_FOUND); + KEY_NOT_FOUND); } finally { metadataManager.getLock().releaseBucketLock(volumeName, bucketName); } @@ -748,7 +766,7 @@ public void renameKey(OmKeyArgs args, String toKeyName) throws IOException { + "Key: {} not found.", volumeName, bucketName, fromKeyName, toKeyName, fromKeyName); throw new OMException("Key not found", - OMException.ResultCodes.KEY_NOT_FOUND); + KEY_NOT_FOUND); } // A rename is a no-op if the target and source name is same. @@ -805,7 +823,7 @@ public void deleteKey(OmKeyArgs args) throws IOException { OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey); if (keyInfo == null) { throw new OMException("Key not found", - OMException.ResultCodes.KEY_NOT_FOUND); + KEY_NOT_FOUND); } else { // directly delete key with no blocks from db. This key need not be // moved to deleted table. @@ -937,6 +955,8 @@ public OmMultipartInfo applyInitiateMultipartUpload(OmKeyArgs keyArgs, .setReplicationFactor(keyArgs.getFactor()) .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, locations))) + .setAcls(keyArgs.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .build(); DBStore store = metadataManager.getStore(); try (BatchOperation batch = store.initBatchOperation()) { @@ -1170,13 +1190,13 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( .setDataSize(size) .setOmKeyLocationInfos( Collections.singletonList(keyLocationInfoGroup)) - .build(); + .setAcls(omKeyArgs.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())).build(); } else { // Already a version exists, so we should add it as a new version. // But now as versioning is not supported, just following the commit - // key approach. - // When versioning support comes, then we can uncomment below code - // keyInfo.addNewVersion(locations); + // key approach. When versioning support comes, then we can uncomment + // below code keyInfo.addNewVersion(locations); keyInfo.updateLocationInfoList(locations); } DBStore store = metadataManager.getStore(); @@ -1345,6 +1365,305 @@ public OmMultipartUploadListParts listParts(String volumeName, } } + /** + * Add acl for Ozone object. Return true if acl is added successfully else + * false. + * + * @param obj Ozone object for which acl should be added. + * @param acl ozone acl top be added. + * @throws IOException if there is error. + */ + @Override + public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + validateOzoneObj(obj); + String volume = obj.getVolumeName(); + String bucket = obj.getBucketName(); + String keyName = obj.getKeyName(); + + metadataManager.getLock().acquireBucketLock(volume, bucket); + try { + validateBucket(volume, bucket); + String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName); + OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey); + Table keyTable; + if (keyInfo == null) { + keyInfo = metadataManager.getOpenKeyTable().get(objectKey); + if (keyInfo == null) { + throw new OMException("Key not found. Key:" + + objectKey, KEY_NOT_FOUND); + } + keyTable = metadataManager.getOpenKeyTable(); + } else { + keyTable = metadataManager.getKeyTable(); + } + List newAcls = new ArrayList<>(keyInfo.getAcls()); + OzoneAclInfo newAcl = null; + for(OzoneAclInfo a: keyInfo.getAcls()) { + if(a.getName().equals(acl.getName())) { + List rights = + new ArrayList<>(a.getRightsList()); + for (IAccessAuthorizer.ACLType aclType : acl.getAclList()) { + rights.add(OzoneAclRights.valueOf(aclType.name())); + } + newAcl = OzoneAclInfo.newBuilder() + .setType(a.getType()) + .setName(a.getName()) + .addAllRights(rights) + .build(); + newAcls.remove(a); + newAcls.add(newAcl); + break; + } + } + if(newAcl == null) { + newAcls.add(OzoneAcl.toProtobuf(acl)); + } + + OmKeyInfo newObj = new OmKeyInfo.Builder() + .setBucketName(keyInfo.getBucketName()) + .setKeyName(keyInfo.getKeyName()) + .setReplicationFactor(keyInfo.getFactor()) + .setReplicationType(keyInfo.getType()) + .setVolumeName(keyInfo.getVolumeName()) + .setOmKeyLocationInfos(keyInfo.getKeyLocationVersions()) + .setCreationTime(keyInfo.getCreationTime()) + .setModificationTime(keyInfo.getModificationTime()) + .setAcls(newAcls) + .setDataSize(keyInfo.getDataSize()) + .setFileEncryptionInfo(keyInfo.getFileEncryptionInfo()) + .build(); + keyTable.put(objectKey, newObj); + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Add acl operation failed for key:{}/{}/{}", volume, + bucket, keyName, ex); + } + throw ex; + } finally { + metadataManager.getLock().releaseBucketLock(volume, bucket); + } + return true; + } + + /** + * Remove acl for Ozone object. Return true if acl is removed successfully + * else false. + * + * @param obj Ozone object. + * @param acl Ozone acl to be removed. + * @throws IOException if there is error. + */ + @Override + public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { + validateOzoneObj(obj); + String volume = obj.getVolumeName(); + String bucket = obj.getBucketName(); + String keyName = obj.getKeyName(); + + metadataManager.getLock().acquireBucketLock(volume, bucket); + try { + validateBucket(volume, bucket); + String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName); + OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey); + Table keyTable; + if (keyInfo == null) { + keyInfo = metadataManager.getOpenKeyTable().get(objectKey); + if (keyInfo == null) { + throw new OMException("Key not found. Key:" + + objectKey, KEY_NOT_FOUND); + } + keyTable = metadataManager.getOpenKeyTable(); + } else { + keyTable = metadataManager.getKeyTable(); + } + + List newAcls = new ArrayList<>(keyInfo.getAcls()); + OzoneAclInfo newAcl = OzoneAcl.toProtobuf(acl); + + if(newAcls.contains(OzoneAcl.toProtobuf(acl))) { + newAcls.remove(newAcl); + } else { + // Acl to be removed might be a subset of existing acls. + for(OzoneAclInfo a: keyInfo.getAcls()) { + if(a.getName().equals(acl.getName())) { + List rights = + new ArrayList<>(a.getRightsList()); + for (IAccessAuthorizer.ACLType aclType : acl.getAclList()) { + rights.remove(OzoneAclRights.valueOf(aclType.name())); + } + newAcl = OzoneAclInfo.newBuilder() + .setType(a.getType()) + .setName(a.getName()) + .addAllRights(rights) + .build(); + newAcls.remove(a); + newAcls.add(newAcl); + break; + } + } + if(newAcl == null) { + newAcls.add(OzoneAcl.toProtobuf(acl)); + } + } + + OmKeyInfo newObj = new OmKeyInfo.Builder() + .setBucketName(keyInfo.getBucketName()) + .setKeyName(keyInfo.getKeyName()) + .setReplicationFactor(keyInfo.getFactor()) + .setReplicationType(keyInfo.getType()) + .setVolumeName(keyInfo.getVolumeName()) + .setOmKeyLocationInfos(keyInfo.getKeyLocationVersions()) + .setCreationTime(keyInfo.getCreationTime()) + .setModificationTime(keyInfo.getModificationTime()) + .setAcls(newAcls) + .setDataSize(keyInfo.getDataSize()) + .setFileEncryptionInfo(keyInfo.getFileEncryptionInfo()) + .build(); + + keyTable.put(objectKey, newObj); + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Remove acl operation failed for key:{}/{}/{}", volume, + bucket, keyName, ex); + } + throw ex; + } finally { + metadataManager.getLock().releaseBucketLock(volume, bucket); + } + return true; + } + + /** + * Acls to be set for given Ozone object. This operations reset ACL for given + * object to list of ACLs provided in argument. + * + * @param obj Ozone object. + * @param acls List of acls. + * @throws IOException if there is error. + */ + @Override + public boolean setAcl(OzoneObj obj, List acls) throws IOException { + validateOzoneObj(obj); + String volume = obj.getVolumeName(); + String bucket = obj.getBucketName(); + String keyName = obj.getKeyName(); + + metadataManager.getLock().acquireBucketLock(volume, bucket); + try { + validateBucket(volume, bucket); + String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName); + OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey); + Table keyTable; + if (keyInfo == null) { + keyInfo = metadataManager.getOpenKeyTable().get(objectKey); + if (keyInfo == null) { + throw new OMException("Key not found. Key:" + + objectKey, KEY_NOT_FOUND); + } + keyTable = metadataManager.getOpenKeyTable(); + } else { + keyTable = metadataManager.getKeyTable(); + } + + List newAcls = new ArrayList<>(); + for (OzoneAcl a : acls) { + newAcls.add(OzoneAcl.toProtobuf(a)); + } + OmKeyInfo newObj = new OmKeyInfo.Builder() + .setBucketName(keyInfo.getBucketName()) + .setKeyName(keyInfo.getKeyName()) + .setReplicationFactor(keyInfo.getFactor()) + .setReplicationType(keyInfo.getType()) + .setVolumeName(keyInfo.getVolumeName()) + .setOmKeyLocationInfos(keyInfo.getKeyLocationVersions()) + .setCreationTime(keyInfo.getCreationTime()) + .setModificationTime(keyInfo.getModificationTime()) + .setAcls(newAcls) + .setDataSize(keyInfo.getDataSize()) + .setFileEncryptionInfo(keyInfo.getFileEncryptionInfo()) + .build(); + + keyTable.put(objectKey, newObj); + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Set acl operation failed for key:{}/{}/{}", volume, + bucket, keyName, ex); + } + throw ex; + } finally { + metadataManager.getLock().releaseBucketLock(volume, bucket); + } + return true; + } + + /** + * Returns list of ACLs for given Ozone object. + * + * @param obj Ozone object. + * @throws IOException if there is error. + */ + @Override + public List getAcl(OzoneObj obj) throws IOException { + validateOzoneObj(obj); + String volume = obj.getVolumeName(); + String bucket = obj.getBucketName(); + String keyName = obj.getKeyName(); + + metadataManager.getLock().acquireBucketLock(volume, bucket); + try { + validateBucket(volume, bucket); + String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName); + OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey); + if (keyInfo == null) { + keyInfo = metadataManager.getOpenKeyTable().get(objectKey); + if (keyInfo == null) { + throw new OMException("Key not found. Key:" + + objectKey, KEY_NOT_FOUND); + } + } + + List acls = new ArrayList<>(); + for (OzoneAclInfo a : keyInfo.getAcls()) { + acls.add(OzoneAcl.fromProtobuf(a)); + } + return acls; + } catch (IOException ex) { + if (!(ex instanceof OMException)) { + LOG.error("Get acl operation failed for key:{}/{}/{}", volume, + bucket, keyName, ex); + } + throw ex; + } finally { + metadataManager.getLock().releaseBucketLock(volume, bucket); + } + } + + /** + * Helper method to validate ozone object. + * @param obj + * */ + private void validateOzoneObj(OzoneObj obj) throws OMException { + Objects.requireNonNull(obj); + + if (!obj.getResourceType().equals(KEY)) { + throw new IllegalArgumentException("Unexpected argument passed to " + + "KeyManager. OzoneObj type:" + obj.getResourceType()); + } + String volume = obj.getVolumeName(); + String bucket = obj.getBucketName(); + String keyName = obj.getKeyName(); + + if (Strings.isNullOrEmpty(volume)) { + throw new OMException("Volume name is required.", VOLUME_NOT_FOUND); + } + if (Strings.isNullOrEmpty(bucket)) { + throw new OMException("Bucket name is required.", BUCKET_NOT_FOUND); + } + if (Strings.isNullOrEmpty(keyName)) { + throw new OMException("Key name is required.", KEY_NOT_FOUND); + } + } + /** * OzoneFS api to get file status for an entry. * @@ -1435,7 +1754,7 @@ public void createDirectory(OmKeyArgs args) throws IOException { return; } OmKeyInfo dirDbKeyInfo = - createDirectoryKey(volumeName, bucketName, keyName); + createDirectoryKey(volumeName, bucketName, keyName, args.getAcls()); String dirDbKey = metadataManager .getOzoneKey(volumeName, bucketName, dirDbKeyInfo.getKeyName()); metadataManager.getKeyTable().put(dirDbKey, dirDbKeyInfo); @@ -1445,7 +1764,7 @@ public void createDirectory(OmKeyArgs args) throws IOException { } private OmKeyInfo createDirectoryKey(String volumeName, String bucketName, - String keyName) throws IOException { + String keyName, List acls) throws IOException { // verify bucket exists OmBucketInfo bucketInfo = getBucketInfo(volumeName, bucketName); @@ -1463,6 +1782,8 @@ private OmKeyInfo createDirectoryKey(String volumeName, String bucketName, .setReplicationType(ReplicationType.RATIS) .setReplicationFactor(ReplicationFactor.ONE) .setFileEncryptionInfo(encInfo) + .setAcls(acls.stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index a89e4b29c6709..94b8b8c93a628 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -2943,9 +2943,10 @@ public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { switch (obj.getResourceType()) { case VOLUME: return volumeManager.addAcl(obj, acl); - case BUCKET: return bucketManager.addAcl(obj, acl); + case KEY: + return keyManager.addAcl(obj, acl); default: throw new OMException("Unexpected resource type: " + obj.getResourceType(), INVALID_REQUEST); @@ -2973,6 +2974,8 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { case BUCKET: return bucketManager.removeAcl(obj, acl); + case KEY: + return keyManager.removeAcl(obj, acl); default: throw new OMException("Unexpected resource type: " + obj.getResourceType(), INVALID_REQUEST); @@ -2997,9 +3000,10 @@ public boolean setAcl(OzoneObj obj, List acls) throws IOException { switch (obj.getResourceType()) { case VOLUME: return volumeManager.setAcl(obj, acls); - case BUCKET: return bucketManager.setAcl(obj, acls); + case KEY: + return keyManager.setAcl(obj, acls); default: throw new OMException("Unexpected resource type: " + obj.getResourceType(), INVALID_REQUEST); @@ -3022,9 +3026,10 @@ public List getAcl(OzoneObj obj) throws IOException { switch (obj.getResourceType()) { case VOLUME: return volumeManager.getAcl(obj); - case BUCKET: return bucketManager.getAcl(obj); + case KEY: + return keyManager.getAcl(obj); default: throw new OMException("Unexpected resource type: " + obj.getResourceType(), INVALID_REQUEST); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java index 9519f770c662c..6ff289a81bbe9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java @@ -542,7 +542,7 @@ public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { try { volumeArgs.addAcl(acl); } catch (OMException ex) { - LOG.info("Add acl failed.", ex); + LOG.debug("Add acl failed.", ex); return false; } metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); @@ -592,7 +592,7 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { try { volumeArgs.removeAcl(acl); } catch (OMException ex) { - LOG.info("Remove acl failed.", ex); + LOG.debug("Remove acl failed.", ex); return false; } metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java index d78cc66e37aa2..58ab181402dc9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java @@ -170,6 +170,8 @@ private OMResponse processReply(OMRequest omRequest, RaftClientReply reply) omResponse.setMessage(stateMachineException.getCause().getMessage()); omResponse.setStatus(parseErrorStatus( stateMachineException.getCause().getMessage())); + LOG.debug("Error while executing ratis request. " + + "stateMachineException: ", stateMachineException); return omResponse.build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index b82265024a3c7..568262f55548b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -576,6 +576,8 @@ private CreateKeyResponse createKey(CreateKeyRequest request) .setIsMultipartKey(keyArgs.getIsMultipartKey()) .setMultipartUploadID(keyArgs.getMultipartUploadID()) .setMultipartUploadPartNumber(keyArgs.getMultipartNumber()) + .setAcls(keyArgs.getAclsList().stream().map(a -> + OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())) .build(); if (keyArgs.hasDataSize()) { omKeyArgs.setDataSize(keyArgs.getDataSize()); @@ -825,6 +827,8 @@ private MultipartInfoInitiateResponse initiateMultiPartUpload( .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) .setType(keyArgs.getType()) + .setAcls(keyArgs.getAclsList().stream().map(a -> + OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())) .setFactor(keyArgs.getFactor()) .build(); OmMultipartInfo multipartInfo = impl.initiateMultipartUpload(omKeyArgs); @@ -847,6 +851,8 @@ private MultipartInfoInitiateResponse applyInitiateMultiPartUpload( .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) .setType(keyArgs.getType()) + .setAcls(keyArgs.getAclsList().stream().map(a -> + OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())) .setFactor(keyArgs.getFactor()) .build(); OmMultipartInfo multipartInfo = @@ -905,6 +911,8 @@ private MultipartUploadCompleteResponse completeMultipartUpload( .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) + .setAcls(keyArgs.getAclsList().stream().map(a -> + OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())) .setMultipartUploadID(keyArgs.getMultipartUploadID()) .build(); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = impl @@ -1050,6 +1058,8 @@ private void createDirectory(CreateDirectoryRequest request) .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) + .setAcls(keyArgs.getAclsList().stream().map(a -> + OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())) .build(); impl.createDirectory(omKeyArgs); } @@ -1064,6 +1074,8 @@ private CreateFileResponse createFile( .setDataSize(keyArgs.getDataSize()) .setType(keyArgs.getType()) .setFactor(keyArgs.getFactor()) + .setAcls(keyArgs.getAclsList().stream().map(a -> + OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())) .build(); OpenKeySession keySession = impl.createFile(omKeyArgs, request.getIsOverwrite(), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java index ab846b85f8a59..357feeb7b1add 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java @@ -22,6 +22,7 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -192,6 +193,7 @@ private void createAndDeleteKeys(KeyManager keyManager, int keyCount, .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) + .setAcls(Collections.emptyList()) .setLocationInfoList(new ArrayList<>()) .build(); //Open, Commit and Delete the Keys in the Key Manager. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index ad2b2b196e3f9..fb323fe0c63e3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -49,6 +49,8 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.web.utils.OzoneUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; @@ -60,6 +62,7 @@ import org.mockito.Mockito; import static org.apache.hadoop.ozone.OzoneConfigKeys.*; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; /** * Test class for @{@link KeyManagerImpl}. @@ -173,11 +176,14 @@ public void allocateBlockFailureInSafeMode() throws Exception { @Test public void openKeyFailureInSafeMode() throws Exception { + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); KeyManager keyManager1 = new KeyManagerImpl(mockScmBlockLocationProtocol, metadataManager, conf, "om1", null); OmKeyArgs keyArgs = createBuilder() .setKeyName(KEY_NAME) .setDataSize(1000) + .setAcls(OzoneUtils.getAclList(ugi.getUserName(), ugi.getGroups(), + ALL, ALL)) .build(); LambdaTestUtils.intercept(OMException.class, "SafeModePrecheck failed for allocateBlock", () -> { @@ -355,7 +361,7 @@ public void testLookupFile() throws IOException { } } - private OmKeyArgs createKeyArgs(String toKeyName) { + private OmKeyArgs createKeyArgs(String toKeyName) throws IOException { return createBuilder().setKeyName(toKeyName).build(); } @@ -542,12 +548,15 @@ private List createFiles(String parent, return keyNames; } - private OmKeyArgs.Builder createBuilder() { + private OmKeyArgs.Builder createBuilder() throws IOException { + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); return new OmKeyArgs.Builder() .setBucketName(BUCKET_NAME) .setFactor(ReplicationFactor.ONE) .setDataSize(0) .setType(ReplicationType.STAND_ALONE) + .setAcls(OzoneUtils.getAclList(ugi.getUserName(), ugi.getGroups(), + ALL, ALL)) .setVolumeName(VOLUME_NAME); } } \ No newline at end of file