diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java index 32f175a88f735..50cc57447f92b 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java @@ -61,6 +61,7 @@ import org.apache.hadoop.fs.azurebfs.services.ExponentialRetryPolicy; import org.apache.hadoop.fs.azurebfs.services.KeyProvider; import org.apache.hadoop.fs.azurebfs.services.SimpleKeyProvider; +import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderFormat; import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory; import org.apache.hadoop.security.ProviderUtils; import org.apache.hadoop.util.ReflectionUtils; @@ -68,6 +69,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.EMPTY_STRING; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.*; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.*; @@ -270,6 +272,10 @@ public class AbfsConfiguration{ DefaultValue = DEFAULT_VALUE_UNKNOWN) private String clusterType; + @StringConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_CLIENT_CORRELATIONID, + DefaultValue = EMPTY_STRING) + private String clientCorrelationId; + @BooleanConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_ENABLE_DELEGATION_TOKEN, DefaultValue = DEFAULT_ENABLE_DELEGATION_TOKEN) private boolean enableDelegationToken; @@ -338,6 +344,14 @@ public String getAccountName() { return accountName; } + /** + * Gets client correlation ID provided in config. + * @return Client Correlation ID config + */ + public String getClientCorrelationId() { + return clientCorrelationId; + } + /** * Appends an account name to a configuration key yielding the * account-specific form. @@ -728,6 +742,14 @@ public DelegatingSSLSocketFactory.SSLChannelMode getPreferredSSLFactoryOption() return getEnum(FS_AZURE_SSL_CHANNEL_MODE_KEY, DEFAULT_FS_AZURE_SSL_CHANNEL_MODE); } + /** + * Enum config to allow user to pick format of x-ms-client-request-id header + * @return tracingContextFormat config if valid, else default ALL_ID_FORMAT + */ + public TracingHeaderFormat getTracingHeaderFormat() { + return getEnum(FS_AZURE_TRACINGHEADER_FORMAT, TracingHeaderFormat.ALL_ID_FORMAT); + } + public AuthType getAuthType(String accountName) { return getEnum(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SharedKey); } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java index b1d2a333bdf9d..a8bf7c16eecfd 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java @@ -34,6 +34,7 @@ import java.util.EnumSet; import java.util.Map; import java.util.Optional; +import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; @@ -68,6 +69,7 @@ import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; import org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations; import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes; +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.FileSystemOperationUnhandledException; @@ -77,6 +79,9 @@ import org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode; import org.apache.hadoop.fs.azurebfs.security.AbfsDelegationTokenManager; import org.apache.hadoop.fs.azurebfs.services.AbfsCounters; +import org.apache.hadoop.fs.azurebfs.utils.Listener; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; +import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderFormat; import org.apache.hadoop.fs.impl.AbstractFSBuilderImpl; import org.apache.hadoop.fs.impl.OpenFileParameters; import org.apache.hadoop.fs.permission.AclEntry; @@ -111,10 +116,14 @@ public class AzureBlobFileSystem extends FileSystem private Path workingDir; private AzureBlobFileSystemStore abfsStore; private boolean isClosed; + private final String fileSystemId = UUID.randomUUID().toString(); private boolean delegationTokenEnabled = false; private AbfsDelegationTokenManager delegationTokenManager; private AbfsCounters abfsCounters; + private String clientCorrelationId; + private TracingHeaderFormat tracingHeaderFormat; + private Listener listener; @Override public void initialize(URI uri, Configuration configuration) @@ -131,13 +140,19 @@ public void initialize(URI uri, Configuration configuration) configuration, abfsCounters); LOG.trace("AzureBlobFileSystemStore init complete"); - final AbfsConfiguration abfsConfiguration = abfsStore.getAbfsConfiguration(); + final AbfsConfiguration abfsConfiguration = abfsStore + .getAbfsConfiguration(); + clientCorrelationId = TracingContext.validateClientCorrelationID( + abfsConfiguration.getClientCorrelationId()); + tracingHeaderFormat = abfsConfiguration.getTracingHeaderFormat(); this.setWorkingDirectory(this.getHomeDirectory()); if (abfsConfiguration.getCreateRemoteFileSystemDuringInitialization()) { - if (this.tryGetFileStatus(new Path(AbfsHttpConstants.ROOT_PATH)) == null) { + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.CREATE_FILESYSTEM, tracingHeaderFormat, listener); + if (this.tryGetFileStatus(new Path(AbfsHttpConstants.ROOT_PATH), tracingContext) == null) { try { - this.createFileSystem(); + this.createFileSystem(tracingContext); } catch (AzureBlobFileSystemException ex) { checkException(null, ex, AzureServiceErrorCode.FILE_SYSTEM_ALREADY_EXISTS); } @@ -181,6 +196,10 @@ public URI getUri() { return this.uri; } + public void registerListener(Listener listener1) { + listener = listener1; + } + @Override public FSDataInputStream open(final Path path, final int bufferSize) throws IOException { LOG.debug("AzureBlobFileSystem.open path: {} bufferSize: {}", path, bufferSize); @@ -194,8 +213,11 @@ private FSDataInputStream open(final Path path, Path qualifiedPath = makeQualified(path); try { + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.OPEN, tracingHeaderFormat, + listener); InputStream inputStream = abfsStore.openFileForRead(qualifiedPath, - options, statistics); + options, statistics, tracingContext); return new FSDataInputStream(inputStream); } catch(AzureBlobFileSystemException ex) { checkException(path, ex); @@ -231,8 +253,11 @@ public FSDataOutputStream create(final Path f, final FsPermission permission, fi Path qualifiedPath = makeQualified(f); try { + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.CREATE, overwrite, tracingHeaderFormat, listener); OutputStream outputStream = abfsStore.createFile(qualifiedPath, statistics, overwrite, - permission == null ? FsPermission.getFileDefault() : permission, FsPermission.getUMask(getConf())); + permission == null ? FsPermission.getFileDefault() : permission, + FsPermission.getUMask(getConf()), tracingContext); statIncrement(FILES_CREATED); return new FSDataOutputStream(outputStream, statistics); } catch(AzureBlobFileSystemException ex) { @@ -249,7 +274,10 @@ public FSDataOutputStream createNonRecursive(final Path f, final FsPermission pe statIncrement(CALL_CREATE_NON_RECURSIVE); final Path parent = f.getParent(); - final FileStatus parentFileStatus = tryGetFileStatus(parent); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.CREATE_NON_RECURSIVE, tracingHeaderFormat, + listener); + final FileStatus parentFileStatus = tryGetFileStatus(parent, tracingContext); if (parentFileStatus == null) { throw new FileNotFoundException("Cannot create file " @@ -295,7 +323,11 @@ public FSDataOutputStream append(final Path f, final int bufferSize, final Progr Path qualifiedPath = makeQualified(f); try { - OutputStream outputStream = abfsStore.openFileForWrite(qualifiedPath, statistics, false); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.APPEND, tracingHeaderFormat, + listener); + OutputStream outputStream = abfsStore + .openFileForWrite(qualifiedPath, statistics, false, tracingContext); return new FSDataOutputStream(outputStream, statistics); } catch(AzureBlobFileSystemException ex) { checkException(f, ex); @@ -316,9 +348,12 @@ public boolean rename(final Path src, final Path dst) throws IOException { Path qualifiedSrcPath = makeQualified(src); Path qualifiedDstPath = makeQualified(dst); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.RENAME, true, tracingHeaderFormat, + listener); // rename under same folder; if(makeQualified(parentFolder).equals(qualifiedDstPath)) { - return tryGetFileStatus(qualifiedSrcPath) != null; + return tryGetFileStatus(qualifiedSrcPath, tracingContext) != null; } FileStatus dstFileStatus = null; @@ -327,7 +362,7 @@ public boolean rename(final Path src, final Path dst) throws IOException { // - if it doesn't exist, return false // - if it is file, return true // - if it is dir, return false. - dstFileStatus = tryGetFileStatus(qualifiedDstPath); + dstFileStatus = tryGetFileStatus(qualifiedDstPath, tracingContext); if (dstFileStatus == null) { return false; } @@ -335,8 +370,8 @@ public boolean rename(final Path src, final Path dst) throws IOException { } // Non-HNS account need to check dst status on driver side. - if (!abfsStore.getIsNamespaceEnabled() && dstFileStatus == null) { - dstFileStatus = tryGetFileStatus(qualifiedDstPath); + if (!abfsStore.getIsNamespaceEnabled(tracingContext) && dstFileStatus == null) { + dstFileStatus = tryGetFileStatus(qualifiedDstPath, tracingContext); } try { @@ -352,7 +387,7 @@ public boolean rename(final Path src, final Path dst) throws IOException { qualifiedDstPath = makeQualified(adjustedDst); - abfsStore.rename(qualifiedSrcPath, qualifiedDstPath); + abfsStore.rename(qualifiedSrcPath, qualifiedDstPath, tracingContext); return true; } catch(AzureBlobFileSystemException ex) { LOG.debug("Rename operation failed. ", ex); @@ -386,7 +421,10 @@ public boolean delete(final Path f, final boolean recursive) throws IOException } try { - abfsStore.delete(qualifiedPath, recursive); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.DELETE, tracingHeaderFormat, + listener); + abfsStore.delete(qualifiedPath, recursive, tracingContext); return true; } catch (AzureBlobFileSystemException ex) { checkException(f, ex, AzureServiceErrorCode.PATH_NOT_FOUND); @@ -403,7 +441,10 @@ public FileStatus[] listStatus(final Path f) throws IOException { Path qualifiedPath = makeQualified(f); try { - FileStatus[] result = abfsStore.listStatus(qualifiedPath); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.LISTSTATUS, true, tracingHeaderFormat, + listener); + FileStatus[] result = abfsStore.listStatus(qualifiedPath, tracingContext); return result; } catch (AzureBlobFileSystemException ex) { checkException(f, ex); @@ -472,8 +513,12 @@ public boolean mkdirs(final Path f, final FsPermission permission) throws IOExce Path qualifiedPath = makeQualified(f); try { - abfsStore.createDirectory(qualifiedPath, permission == null ? FsPermission.getDirDefault() : permission, - FsPermission.getUMask(getConf())); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.MKDIR, false, tracingHeaderFormat, + listener); + abfsStore.createDirectory(qualifiedPath, + permission == null ? FsPermission.getDirDefault() : permission, + FsPermission.getUMask(getConf()), tracingContext); statIncrement(DIRECTORIES_CREATED); return true; } catch (AzureBlobFileSystemException ex) { @@ -505,14 +550,22 @@ public synchronized void close() throws IOException { @Override public FileStatus getFileStatus(final Path f) throws IOException { - LOG.debug("AzureBlobFileSystem.getFileStatus path: {}", f); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.GET_FILESTATUS, tracingHeaderFormat, + listener); + return getFileStatus(f, tracingContext); + } + + private FileStatus getFileStatus(final Path path, + TracingContext tracingContext) throws IOException { + LOG.debug("AzureBlobFileSystem.getFileStatus path: {}", path); statIncrement(CALL_GET_FILE_STATUS); - Path qualifiedPath = makeQualified(f); + Path qualifiedPath = makeQualified(path); try { - return abfsStore.getFileStatus(qualifiedPath); + return abfsStore.getFileStatus(qualifiedPath, tracingContext); } catch(AzureBlobFileSystemException ex) { - checkException(f, ex); + checkException(path, ex); return null; } } @@ -531,7 +584,10 @@ public void breakLease(final Path f) throws IOException { try (DurationInfo ignored = new DurationInfo(LOG, false, "Break lease for %s", qualifiedPath)) { - abfsStore.breakLease(qualifiedPath); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.BREAK_LEASE, tracingHeaderFormat, + listener); + abfsStore.breakLease(qualifiedPath, tracingContext); } catch(AzureBlobFileSystemException ex) { checkException(f, ex); } @@ -704,7 +760,11 @@ public void setOwner(final Path path, final String owner, final String group) throws IOException { LOG.debug( "AzureBlobFileSystem.setOwner path: {}", path); - if (!getIsNamespaceEnabled()) { + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.SET_OWNER, true, tracingHeaderFormat, + listener); + + if (!getIsNamespaceEnabled(tracingContext)) { super.setOwner(path, owner, group); return; } @@ -718,7 +778,8 @@ public void setOwner(final Path path, final String owner, final String group) try { abfsStore.setOwner(qualifiedPath, owner, - group); + group, + tracingContext); } catch (AzureBlobFileSystemException ex) { checkException(path, ex); } @@ -746,14 +807,18 @@ public void setXAttr(final Path path, final String name, final byte[] value, fin Path qualifiedPath = makeQualified(path); try { - Hashtable properties = abfsStore.getPathStatus(qualifiedPath); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.SET_ATTR, true, tracingHeaderFormat, + listener); + Hashtable properties = abfsStore + .getPathStatus(qualifiedPath, tracingContext); String xAttrName = ensureValidAttributeName(name); boolean xAttrExists = properties.containsKey(xAttrName); XAttrSetFlag.validate(name, xAttrExists, flag); String xAttrValue = abfsStore.decodeAttribute(value); properties.put(xAttrName, xAttrValue); - abfsStore.setPathProperties(qualifiedPath, properties); + abfsStore.setPathProperties(qualifiedPath, properties, tracingContext); } catch (AzureBlobFileSystemException ex) { checkException(path, ex); } @@ -782,7 +847,11 @@ public byte[] getXAttr(final Path path, final String name) byte[] value = null; try { - Hashtable properties = abfsStore.getPathStatus(qualifiedPath); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.GET_ATTR, true, tracingHeaderFormat, + listener); + Hashtable properties = abfsStore + .getPathStatus(qualifiedPath, tracingContext); String xAttrName = ensureValidAttributeName(name); if (properties.containsKey(xAttrName)) { String xAttrValue = properties.get(xAttrName); @@ -809,7 +878,10 @@ private static String ensureValidAttributeName(String attribute) { public void setPermission(final Path path, final FsPermission permission) throws IOException { LOG.debug("AzureBlobFileSystem.setPermission path: {}", path); - if (!getIsNamespaceEnabled()) { + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.SET_PERMISSION, true, tracingHeaderFormat, listener); + + if (!getIsNamespaceEnabled(tracingContext)) { super.setPermission(path, permission); return; } @@ -821,8 +893,7 @@ public void setPermission(final Path path, final FsPermission permission) Path qualifiedPath = makeQualified(path); try { - abfsStore.setPermission(qualifiedPath, - permission); + abfsStore.setPermission(qualifiedPath, permission, tracingContext); } catch (AzureBlobFileSystemException ex) { checkException(path, ex); } @@ -842,8 +913,11 @@ public void setPermission(final Path path, final FsPermission permission) public void modifyAclEntries(final Path path, final List aclSpec) throws IOException { LOG.debug("AzureBlobFileSystem.modifyAclEntries path: {}", path); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.MODIFY_ACL, true, tracingHeaderFormat, + listener); - if (!getIsNamespaceEnabled()) { + if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException( "modifyAclEntries is only supported by storage accounts with the " + "hierarchical namespace enabled."); @@ -856,8 +930,7 @@ public void modifyAclEntries(final Path path, final List aclSpec) Path qualifiedPath = makeQualified(path); try { - abfsStore.modifyAclEntries(qualifiedPath, - aclSpec); + abfsStore.modifyAclEntries(qualifiedPath, aclSpec, tracingContext); } catch (AzureBlobFileSystemException ex) { checkException(path, ex); } @@ -875,8 +948,11 @@ public void modifyAclEntries(final Path path, final List aclSpec) public void removeAclEntries(final Path path, final List aclSpec) throws IOException { LOG.debug("AzureBlobFileSystem.removeAclEntries path: {}", path); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.REMOVE_ACL_ENTRIES, true, + tracingHeaderFormat, listener); - if (!getIsNamespaceEnabled()) { + if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException( "removeAclEntries is only supported by storage accounts with the " + "hierarchical namespace enabled."); @@ -889,7 +965,7 @@ public void removeAclEntries(final Path path, final List aclSpec) Path qualifiedPath = makeQualified(path); try { - abfsStore.removeAclEntries(qualifiedPath, aclSpec); + abfsStore.removeAclEntries(qualifiedPath, aclSpec, tracingContext); } catch (AzureBlobFileSystemException ex) { checkException(path, ex); } @@ -904,8 +980,11 @@ public void removeAclEntries(final Path path, final List aclSpec) @Override public void removeDefaultAcl(final Path path) throws IOException { LOG.debug("AzureBlobFileSystem.removeDefaultAcl path: {}", path); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.REMOVE_DEFAULT_ACL, true, + tracingHeaderFormat, listener); - if (!getIsNamespaceEnabled()) { + if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException( "removeDefaultAcl is only supported by storage accounts with the " + "hierarchical namespace enabled."); @@ -914,7 +993,7 @@ public void removeDefaultAcl(final Path path) throws IOException { Path qualifiedPath = makeQualified(path); try { - abfsStore.removeDefaultAcl(qualifiedPath); + abfsStore.removeDefaultAcl(qualifiedPath, tracingContext); } catch (AzureBlobFileSystemException ex) { checkException(path, ex); } @@ -931,8 +1010,11 @@ public void removeDefaultAcl(final Path path) throws IOException { @Override public void removeAcl(final Path path) throws IOException { LOG.debug("AzureBlobFileSystem.removeAcl path: {}", path); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.REMOVE_ACL, true, tracingHeaderFormat, + listener); - if (!getIsNamespaceEnabled()) { + if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException( "removeAcl is only supported by storage accounts with the " + "hierarchical namespace enabled."); @@ -941,7 +1023,7 @@ public void removeAcl(final Path path) throws IOException { Path qualifiedPath = makeQualified(path); try { - abfsStore.removeAcl(qualifiedPath); + abfsStore.removeAcl(qualifiedPath, tracingContext); } catch (AzureBlobFileSystemException ex) { checkException(path, ex); } @@ -961,8 +1043,11 @@ public void removeAcl(final Path path) throws IOException { public void setAcl(final Path path, final List aclSpec) throws IOException { LOG.debug("AzureBlobFileSystem.setAcl path: {}", path); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.SET_ACL, true, tracingHeaderFormat, + listener); - if (!getIsNamespaceEnabled()) { + if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException( "setAcl is only supported by storage accounts with the hierarchical " + "namespace enabled."); @@ -975,7 +1060,7 @@ public void setAcl(final Path path, final List aclSpec) Path qualifiedPath = makeQualified(path); try { - abfsStore.setAcl(qualifiedPath, aclSpec); + abfsStore.setAcl(qualifiedPath, aclSpec, tracingContext); } catch (AzureBlobFileSystemException ex) { checkException(path, ex); } @@ -991,8 +1076,10 @@ public void setAcl(final Path path, final List aclSpec) @Override public AclStatus getAclStatus(final Path path) throws IOException { LOG.debug("AzureBlobFileSystem.getAclStatus path: {}", path); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.GET_ACL_STATUS, true, tracingHeaderFormat, listener); - if (!getIsNamespaceEnabled()) { + if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException( "getAclStatus is only supported by storage account with the " + "hierarchical namespace enabled."); @@ -1001,7 +1088,7 @@ public AclStatus getAclStatus(final Path path) throws IOException { Path qualifiedPath = makeQualified(path); try { - return abfsStore.getAclStatus(qualifiedPath); + return abfsStore.getAclStatus(qualifiedPath, tracingContext); } catch (AzureBlobFileSystemException ex) { checkException(path, ex); return null; @@ -1025,7 +1112,10 @@ public void access(final Path path, final FsAction mode) throws IOException { LOG.debug("AzureBlobFileSystem.access path : {}, mode : {}", path, mode); Path qualifiedPath = makeQualified(path); try { - this.abfsStore.access(qualifiedPath, mode); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.ACCESS, tracingHeaderFormat, + listener); + this.abfsStore.access(qualifiedPath, mode, tracingContext); } catch (AzureBlobFileSystemException ex) { checkCheckAccessException(path, ex); } @@ -1049,17 +1139,20 @@ public RemoteIterator listStatusIterator(Path path) throws IOException { LOG.debug("AzureBlobFileSystem.listStatusIterator path : {}", path); if (abfsStore.getAbfsConfiguration().enableAbfsListIterator()) { + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.LISTSTATUS, true, tracingHeaderFormat, listener); AbfsListStatusRemoteIterator abfsLsItr = - new AbfsListStatusRemoteIterator(getFileStatus(path), abfsStore); + new AbfsListStatusRemoteIterator(getFileStatus(path, tracingContext), abfsStore, + tracingContext); return RemoteIterators.typeCastingRemoteIterator(abfsLsItr); } else { return super.listStatusIterator(path); } } - private FileStatus tryGetFileStatus(final Path f) { + private FileStatus tryGetFileStatus(final Path f, TracingContext tracingContext) { try { - return getFileStatus(f); + return getFileStatus(f, tracingContext); } catch (IOException ex) { LOG.debug("File not found {}", f); statIncrement(ERROR_IGNORED); @@ -1071,7 +1164,9 @@ private boolean fileSystemExists() throws IOException { LOG.debug( "AzureBlobFileSystem.fileSystemExists uri: {}", uri); try { - abfsStore.getFilesystemProperties(); + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fileSystemId, FSOperationType.TEST_OP, tracingHeaderFormat, listener); + abfsStore.getFilesystemProperties(tracingContext); } catch (AzureBlobFileSystemException ex) { try { checkException(null, ex); @@ -1086,11 +1181,11 @@ private boolean fileSystemExists() throws IOException { return true; } - private void createFileSystem() throws IOException { + private void createFileSystem(TracingContext tracingContext) throws IOException { LOG.debug( "AzureBlobFileSystem.createFileSystem uri: {}", uri); try { - abfsStore.createFilesystem(); + abfsStore.createFilesystem(tracingContext); } catch (AzureBlobFileSystemException ex) { checkException(null, ex); } @@ -1283,6 +1378,11 @@ FileSystem.Statistics getFsStatistics() { return this.statistics; } + @VisibleForTesting + void setListenerOperation(FSOperationType operation) { + listener.setOperation(operation); + } + @VisibleForTesting static class FileSystemOperation { private final T result; @@ -1318,8 +1418,9 @@ AbfsDelegationTokenManager getDelegationTokenManager() { } @VisibleForTesting - boolean getIsNamespaceEnabled() throws AzureBlobFileSystemException { - return abfsStore.getIsNamespaceEnabled(); + boolean getIsNamespaceEnabled(TracingContext tracingContext) + throws AzureBlobFileSystemException { + return abfsStore.getIsNamespaceEnabled(tracingContext); } /** @@ -1333,6 +1434,16 @@ Map getInstrumentationMap() { return abfsCounters.toMap(); } + @VisibleForTesting + String getFileSystemId() { + return fileSystemId; + } + + @VisibleForTesting + String getClientCorrelationId() { + return clientCorrelationId; + } + @Override public boolean hasPathCapability(final Path path, final String capability) throws IOException { @@ -1343,7 +1454,10 @@ public boolean hasPathCapability(final Path path, final String capability) case CommonPathCapabilities.FS_APPEND: return true; case CommonPathCapabilities.FS_ACLS: - return getIsNamespaceEnabled(); + return getIsNamespaceEnabled( + new TracingContext(clientCorrelationId, fileSystemId, + FSOperationType.HAS_PATH_CAPABILITY, tracingHeaderFormat, + listener)); default: return super.hasPathCapability(p, capability); } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index be9a9f265fe18..3a527f7f0c3c9 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -113,6 +113,7 @@ import org.apache.hadoop.fs.azurebfs.utils.Base64; import org.apache.hadoop.fs.azurebfs.utils.CRC64; import org.apache.hadoop.fs.azurebfs.utils.DateTimeUtils; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import org.apache.hadoop.fs.azurebfs.utils.UriUtils; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; @@ -310,7 +311,8 @@ private String[] authorityParts(URI uri) throws InvalidUriAuthorityException, In return authorityParts; } - public boolean getIsNamespaceEnabled() throws AzureBlobFileSystemException { + public boolean getIsNamespaceEnabled(TracingContext tracingContext) + throws AzureBlobFileSystemException { try { return this.isNamespaceEnabled.toBoolean(); } catch (TrileanConversionException e) { @@ -321,7 +323,8 @@ public boolean getIsNamespaceEnabled() throws AzureBlobFileSystemException { LOG.debug("Get root ACL status"); try (AbfsPerfInfo perfInfo = startTracking("getIsNamespaceEnabled", "getAclStatus")) { - AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.ROOT_PATH); + AbfsRestOperation op = client + .getAclStatus(AbfsHttpConstants.ROOT_PATH, tracingContext); perfInfo.registerResult(op.getResult()); isNamespaceEnabled = Trilean.getTrilean(true); perfInfo.registerSuccess(true); @@ -374,7 +377,8 @@ public AbfsConfiguration getAbfsConfiguration() { return this.abfsConfiguration; } - public Hashtable getFilesystemProperties() throws AzureBlobFileSystemException { + public Hashtable getFilesystemProperties( + TracingContext tracingContext) throws AzureBlobFileSystemException { try (AbfsPerfInfo perfInfo = startTracking("getFilesystemProperties", "getFilesystemProperties")) { LOG.debug("getFilesystemProperties for filesystem: {}", @@ -382,7 +386,8 @@ public Hashtable getFilesystemProperties() throws AzureBlobFileS final Hashtable parsedXmsProperties; - final AbfsRestOperation op = client.getFilesystemProperties(); + final AbfsRestOperation op = client + .getFilesystemProperties(tracingContext); perfInfo.registerResult(op.getResult()); final String xMsProperties = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_PROPERTIES); @@ -394,7 +399,8 @@ public Hashtable getFilesystemProperties() throws AzureBlobFileS } } - public void setFilesystemProperties(final Hashtable properties) + public void setFilesystemProperties( + final Hashtable properties, TracingContext tracingContext) throws AzureBlobFileSystemException { if (properties == null || properties.isEmpty()) { LOG.trace("setFilesystemProperties no properties present"); @@ -414,19 +420,22 @@ public void setFilesystemProperties(final Hashtable properties) throw new InvalidAbfsRestOperationException(ex); } - final AbfsRestOperation op = client.setFilesystemProperties(commaSeparatedProperties); + final AbfsRestOperation op = client + .setFilesystemProperties(commaSeparatedProperties, tracingContext); perfInfo.registerResult(op.getResult()).registerSuccess(true); } } - public Hashtable getPathStatus(final Path path) throws AzureBlobFileSystemException { + public Hashtable getPathStatus(final Path path, + TracingContext tracingContext) throws AzureBlobFileSystemException { try (AbfsPerfInfo perfInfo = startTracking("getPathStatus", "getPathStatus")){ LOG.debug("getPathStatus for filesystem: {} path: {}", client.getFileSystem(), path); final Hashtable parsedXmsProperties; - final AbfsRestOperation op = client.getPathStatus(getRelativePath(path), true); + final AbfsRestOperation op = client + .getPathStatus(getRelativePath(path), true, tracingContext); perfInfo.registerResult(op.getResult()); final String xMsProperties = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_PROPERTIES); @@ -439,7 +448,9 @@ public Hashtable getPathStatus(final Path path) throws AzureBlob } } - public void setPathProperties(final Path path, final Hashtable properties) throws AzureBlobFileSystemException { + public void setPathProperties(final Path path, + final Hashtable properties, TracingContext tracingContext) + throws AzureBlobFileSystemException { try (AbfsPerfInfo perfInfo = startTracking("setPathProperties", "setPathProperties")){ LOG.debug("setFilesystemProperties for filesystem: {} path: {} with properties: {}", client.getFileSystem(), @@ -452,37 +463,41 @@ public void setPathProperties(final Path path, final Hashtable p } catch (CharacterCodingException ex) { throw new InvalidAbfsRestOperationException(ex); } - final AbfsRestOperation op = client.setPathProperties(getRelativePath(path), commaSeparatedProperties); + final AbfsRestOperation op = client + .setPathProperties(getRelativePath(path), commaSeparatedProperties, + tracingContext); perfInfo.registerResult(op.getResult()).registerSuccess(true); } } - public void createFilesystem() throws AzureBlobFileSystemException { + public void createFilesystem(TracingContext tracingContext) + throws AzureBlobFileSystemException { try (AbfsPerfInfo perfInfo = startTracking("createFilesystem", "createFilesystem")){ LOG.debug("createFilesystem for filesystem: {}", client.getFileSystem()); - final AbfsRestOperation op = client.createFilesystem(); + final AbfsRestOperation op = client.createFilesystem(tracingContext); perfInfo.registerResult(op.getResult()).registerSuccess(true); } } - public void deleteFilesystem() throws AzureBlobFileSystemException { + public void deleteFilesystem(TracingContext tracingContext) + throws AzureBlobFileSystemException { try (AbfsPerfInfo perfInfo = startTracking("deleteFilesystem", "deleteFilesystem")) { LOG.debug("deleteFilesystem for filesystem: {}", client.getFileSystem()); - final AbfsRestOperation op = client.deleteFilesystem(); + final AbfsRestOperation op = client.deleteFilesystem(tracingContext); perfInfo.registerResult(op.getResult()).registerSuccess(true); } } public OutputStream createFile(final Path path, - final FileSystem.Statistics statistics, - final boolean overwrite, final FsPermission permission, - final FsPermission umask) throws AzureBlobFileSystemException { + final FileSystem.Statistics statistics, final boolean overwrite, + final FsPermission permission, final FsPermission umask, + TracingContext tracingContext) throws AzureBlobFileSystemException { try (AbfsPerfInfo perfInfo = startTracking("createFile", "createPath")) { - boolean isNamespaceEnabled = getIsNamespaceEnabled(); + boolean isNamespaceEnabled = getIsNamespaceEnabled(tracingContext); LOG.debug("createFile filesystem: {} path: {} overwrite: {} permission: {} umask: {} isNamespaceEnabled: {}", client.getFileSystem(), path, @@ -512,7 +527,8 @@ public OutputStream createFile(final Path path, statistics, isNamespaceEnabled ? getOctalNotation(permission) : null, isNamespaceEnabled ? getOctalNotation(umask) : null, - isAppendBlob + isAppendBlob, + tracingContext ); } else { @@ -521,18 +537,21 @@ public OutputStream createFile(final Path path, isNamespaceEnabled ? getOctalNotation(permission) : null, isNamespaceEnabled ? getOctalNotation(umask) : null, isAppendBlob, - null); + null, + tracingContext); + } perfInfo.registerResult(op.getResult()).registerSuccess(true); - AbfsLease lease = maybeCreateLease(relativePath); + AbfsLease lease = maybeCreateLease(relativePath, tracingContext); return new AbfsOutputStream( client, statistics, relativePath, 0, - populateAbfsOutputStreamContext(isAppendBlob, lease)); + populateAbfsOutputStreamContext(isAppendBlob, lease), + tracingContext); } } @@ -551,20 +570,22 @@ private AbfsRestOperation conditionalCreateOverwriteFile(final String relativePa final FileSystem.Statistics statistics, final String permission, final String umask, - final boolean isAppendBlob) throws AzureBlobFileSystemException { + final boolean isAppendBlob, + TracingContext tracingContext) throws AzureBlobFileSystemException { AbfsRestOperation op; try { // Trigger a create with overwrite=false first so that eTag fetch can be // avoided for cases when no pre-existing file is present (major portion // of create file traffic falls into the case of no pre-existing file). - op = client.createPath(relativePath, true, - false, permission, umask, isAppendBlob, null); + op = client.createPath(relativePath, true, false, permission, umask, + isAppendBlob, null, tracingContext); + } catch (AbfsRestOperationException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT) { // File pre-exists, fetch eTag try { - op = client.getPathStatus(relativePath, false); + op = client.getPathStatus(relativePath, false, tracingContext); } catch (AbfsRestOperationException ex) { if (ex.getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { // Is a parallel access case, as file which was found to be @@ -582,8 +603,8 @@ private AbfsRestOperation conditionalCreateOverwriteFile(final String relativePa try { // overwrite only if eTag matches with the file properties fetched befpre - op = client.createPath(relativePath, true, - true, permission, umask, isAppendBlob, eTag); + op = client.createPath(relativePath, true, true, permission, umask, + isAppendBlob, eTag, tracingContext); } catch (AbfsRestOperationException ex) { if (ex.getStatusCode() == HttpURLConnection.HTTP_PRECON_FAILED) { // Is a parallel access case, as file with eTag was just queried @@ -623,10 +644,11 @@ private AbfsOutputStreamContext populateAbfsOutputStreamContext(boolean isAppend .build(); } - public void createDirectory(final Path path, final FsPermission permission, final FsPermission umask) + public void createDirectory(final Path path, final FsPermission permission, + final FsPermission umask, TracingContext tracingContext) throws AzureBlobFileSystemException { try (AbfsPerfInfo perfInfo = startTracking("createDirectory", "createPath")) { - boolean isNamespaceEnabled = getIsNamespaceEnabled(); + boolean isNamespaceEnabled = getIsNamespaceEnabled(tracingContext); LOG.debug("createDirectory filesystem: {} path: {} permission: {} umask: {} isNamespaceEnabled: {}", client.getFileSystem(), path, @@ -639,20 +661,21 @@ public void createDirectory(final Path path, final FsPermission permission, fina final AbfsRestOperation op = client.createPath(getRelativePath(path), false, overwrite, isNamespaceEnabled ? getOctalNotation(permission) : null, - isNamespaceEnabled ? getOctalNotation(umask) : null, false, null); + isNamespaceEnabled ? getOctalNotation(umask) : null, false, null, + tracingContext); perfInfo.registerResult(op.getResult()).registerSuccess(true); } } public AbfsInputStream openFileForRead(final Path path, - final FileSystem.Statistics statistics) + final FileSystem.Statistics statistics, TracingContext tracingContext) throws AzureBlobFileSystemException { - return openFileForRead(path, Optional.empty(), statistics); + return openFileForRead(path, Optional.empty(), statistics, tracingContext); } public AbfsInputStream openFileForRead(final Path path, final Optional options, - final FileSystem.Statistics statistics) + final FileSystem.Statistics statistics, TracingContext tracingContext) throws AzureBlobFileSystemException { try (AbfsPerfInfo perfInfo = startTracking("openFileForRead", "getPathStatus")) { LOG.debug("openFileForRead filesystem: {} path: {}", @@ -661,7 +684,8 @@ public AbfsInputStream openFileForRead(final Path path, String relativePath = getRelativePath(path); - final AbfsRestOperation op = client.getPathStatus(relativePath, false); + final AbfsRestOperation op = client + .getPathStatus(relativePath, false, tracingContext); perfInfo.registerResult(op.getResult()); final String resourceType = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_RESOURCE_TYPE); @@ -682,7 +706,7 @@ public AbfsInputStream openFileForRead(final Path path, return new AbfsInputStream(client, statistics, relativePath, contentLength, populateAbfsInputStreamContext(options), - eTag); + eTag, tracingContext); } } @@ -706,8 +730,9 @@ private AbfsInputStreamContext populateAbfsInputStreamContext( .build(); } - public OutputStream openFileForWrite(final Path path, final FileSystem.Statistics statistics, final boolean overwrite) throws - AzureBlobFileSystemException { + public OutputStream openFileForWrite(final Path path, + final FileSystem.Statistics statistics, final boolean overwrite, + TracingContext tracingContext) throws AzureBlobFileSystemException { try (AbfsPerfInfo perfInfo = startTracking("openFileForWrite", "getPathStatus")) { LOG.debug("openFileForWrite filesystem: {} path: {} overwrite: {}", client.getFileSystem(), @@ -716,7 +741,8 @@ public OutputStream openFileForWrite(final Path path, final FileSystem.Statistic String relativePath = getRelativePath(path); - final AbfsRestOperation op = client.getPathStatus(relativePath, false); + final AbfsRestOperation op = client + .getPathStatus(relativePath, false, tracingContext); perfInfo.registerResult(op.getResult()); final String resourceType = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_RESOURCE_TYPE); @@ -739,14 +765,15 @@ public OutputStream openFileForWrite(final Path path, final FileSystem.Statistic isAppendBlob = true; } - AbfsLease lease = maybeCreateLease(relativePath); + AbfsLease lease = maybeCreateLease(relativePath, tracingContext); return new AbfsOutputStream( client, statistics, relativePath, offset, - populateAbfsOutputStreamContext(isAppendBlob, lease)); + populateAbfsOutputStreamContext(isAppendBlob, lease), + tracingContext); } } @@ -754,15 +781,16 @@ public OutputStream openFileForWrite(final Path path, final FileSystem.Statistic * Break any current lease on an ABFS file. * * @param path file name + * @param tracingContext TracingContext instance to track correlation IDs * @throws AzureBlobFileSystemException on any exception while breaking the lease */ - public void breakLease(final Path path) throws AzureBlobFileSystemException { + public void breakLease(final Path path, final TracingContext tracingContext) throws AzureBlobFileSystemException { LOG.debug("lease path: {}", path); - client.breakLease(getRelativePath(path)); + client.breakLease(getRelativePath(path), tracingContext); } - public void rename(final Path source, final Path destination) throws + public void rename(final Path source, final Path destination, TracingContext tracingContext) throws AzureBlobFileSystemException { final Instant startAggregate = abfsPerfTracker.getLatencyInstant(); long countAggregate = 0; @@ -785,8 +813,9 @@ public void rename(final Path source, final Path destination) throws do { try (AbfsPerfInfo perfInfo = startTracking("rename", "renamePath")) { - AbfsRestOperation op = client.renamePath(sourceRelativePath, - destinationRelativePath, continuation); + AbfsRestOperation op = client + .renamePath(sourceRelativePath, destinationRelativePath, + continuation, tracingContext); perfInfo.registerResult(op.getResult()); continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION); perfInfo.registerSuccess(true); @@ -800,8 +829,8 @@ public void rename(final Path source, final Path destination) throws } while (shouldContinue); } - public void delete(final Path path, final boolean recursive) - throws AzureBlobFileSystemException { + public void delete(final Path path, final boolean recursive, + TracingContext tracingContext) throws AzureBlobFileSystemException { final Instant startAggregate = abfsPerfTracker.getLatencyInstant(); long countAggregate = 0; boolean shouldContinue = true; @@ -817,8 +846,8 @@ public void delete(final Path path, final boolean recursive) do { try (AbfsPerfInfo perfInfo = startTracking("delete", "deletePath")) { - AbfsRestOperation op = client.deletePath( - relativePath, recursive, continuation); + AbfsRestOperation op = client + .deletePath(relativePath, recursive, continuation, tracingContext); perfInfo.registerResult(op.getResult()); continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION); perfInfo.registerSuccess(true); @@ -832,9 +861,10 @@ public void delete(final Path path, final boolean recursive) } while (shouldContinue); } - public FileStatus getFileStatus(final Path path) throws IOException { + public FileStatus getFileStatus(final Path path, + TracingContext tracingContext) throws IOException { try (AbfsPerfInfo perfInfo = startTracking("getFileStatus", "undetermined")) { - boolean isNamespaceEnabled = getIsNamespaceEnabled(); + boolean isNamespaceEnabled = getIsNamespaceEnabled(tracingContext); LOG.debug("getFileStatus filesystem: {} path: {} isNamespaceEnabled: {}", client.getFileSystem(), path, @@ -844,14 +874,14 @@ public FileStatus getFileStatus(final Path path) throws IOException { if (path.isRoot()) { if (isNamespaceEnabled) { perfInfo.registerCallee("getAclStatus"); - op = client.getAclStatus(getRelativePath(path)); + op = client.getAclStatus(getRelativePath(path), tracingContext); } else { perfInfo.registerCallee("getFilesystemProperties"); - op = client.getFilesystemProperties(); + op = client.getFilesystemProperties(tracingContext); } } else { perfInfo.registerCallee("getPathStatus"); - op = client.getPathStatus(getRelativePath(path), false); + op = client.getPathStatus(getRelativePath(path), false, tracingContext); } perfInfo.registerResult(op.getResult()); @@ -903,11 +933,12 @@ public FileStatus getFileStatus(final Path path) throws IOException { /** * @param path The list path. + * @param tracingContext Tracks identifiers for request header * @return the entries in the path. * */ @Override - public FileStatus[] listStatus(final Path path) throws IOException { - return listStatus(path, null); + public FileStatus[] listStatus(final Path path, TracingContext tracingContext) throws IOException { + return listStatus(path, null, tracingContext); } /** @@ -918,21 +949,21 @@ public FileStatus[] listStatus(final Path path) throws IOException { * Notice that if startFrom is a non-existent entry name, then the list response contains * all entries after this non-existent entry in lexical order: * listStatus(Path("/folder"), "cfile") will return "/folder/hfile" and "/folder/ifile". - * + * @param tracingContext Tracks identifiers for request header * @return the entries in the path start from "startFrom" in lexical order. * */ @InterfaceStability.Unstable @Override - public FileStatus[] listStatus(final Path path, final String startFrom) throws IOException { + public FileStatus[] listStatus(final Path path, final String startFrom, TracingContext tracingContext) throws IOException { List fileStatuses = new ArrayList<>(); - listStatus(path, startFrom, fileStatuses, true, null); + listStatus(path, startFrom, fileStatuses, true, null, tracingContext); return fileStatuses.toArray(new FileStatus[fileStatuses.size()]); } @Override public String listStatus(final Path path, final String startFrom, List fileStatuses, final boolean fetchAll, - String continuation) throws IOException { + String continuation, TracingContext tracingContext) throws IOException { final Instant startAggregate = abfsPerfTracker.getLatencyInstant(); long countAggregate = 0; boolean shouldContinue = true; @@ -947,7 +978,7 @@ public String listStatus(final Path path, final String startFrom, if (continuation == null || continuation.isEmpty()) { // generate continuation token if a valid startFrom is provided. if (startFrom != null && !startFrom.isEmpty()) { - continuation = getIsNamespaceEnabled() + continuation = getIsNamespaceEnabled(tracingContext) ? generateContinuationTokenForXns(startFrom) : generateContinuationTokenForNonXns(relativePath, startFrom); } @@ -956,7 +987,8 @@ public String listStatus(final Path path, final String startFrom, do { try (AbfsPerfInfo perfInfo = startTracking("listStatus", "listPath")) { AbfsRestOperation op = client.listPath(relativePath, false, - abfsConfiguration.getListMaxResults(), continuation); + abfsConfiguration.getListMaxResults(), continuation, + tracingContext); perfInfo.registerResult(op.getResult()); continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION); ListResultSchema retrievedSchema = op.getResult().getListResultSchema(); @@ -1074,9 +1106,9 @@ private String generateContinuationTokenForNonXns(String path, final String firs return encodedTokenBuilder.toString(); } - public void setOwner(final Path path, final String owner, final String group) throws - AzureBlobFileSystemException { - if (!getIsNamespaceEnabled()) { + public void setOwner(final Path path, final String owner, final String group, + TracingContext tracingContext) throws AzureBlobFileSystemException { + if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException( "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } @@ -1095,15 +1127,16 @@ public void setOwner(final Path path, final String owner, final String group) th final AbfsRestOperation op = client.setOwner(getRelativePath(path), transformedOwner, - transformedGroup); + transformedGroup, + tracingContext); perfInfo.registerResult(op.getResult()).registerSuccess(true); } } - public void setPermission(final Path path, final FsPermission permission) throws - AzureBlobFileSystemException { - if (!getIsNamespaceEnabled()) { + public void setPermission(final Path path, final FsPermission permission, + TracingContext tracingContext) throws AzureBlobFileSystemException { + if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException( "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } @@ -1117,15 +1150,16 @@ public void setPermission(final Path path, final FsPermission permission) throws permission); final AbfsRestOperation op = client.setPermission(getRelativePath(path), - String.format(AbfsHttpConstants.PERMISSION_FORMAT, permission.toOctal())); + String.format(AbfsHttpConstants.PERMISSION_FORMAT, + permission.toOctal()), tracingContext); perfInfo.registerResult(op.getResult()).registerSuccess(true); } } - public void modifyAclEntries(final Path path, final List aclSpec) throws - AzureBlobFileSystemException { - if (!getIsNamespaceEnabled()) { + public void modifyAclEntries(final Path path, final List aclSpec, + TracingContext tracingContext) throws AzureBlobFileSystemException { + if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException( "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } @@ -1144,7 +1178,8 @@ public void modifyAclEntries(final Path path, final List aclSpec) thro String relativePath = getRelativePath(path); - final AbfsRestOperation op = client.getAclStatus(relativePath, useUpn); + final AbfsRestOperation op = client + .getAclStatus(relativePath, useUpn, tracingContext); perfInfoGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); @@ -1155,9 +1190,9 @@ public void modifyAclEntries(final Path path, final List aclSpec) thro perfInfoGet.registerSuccess(true).finishTracking(); try (AbfsPerfInfo perfInfoSet = startTracking("modifyAclEntries", "setAcl")) { - final AbfsRestOperation setAclOp - = client.setAcl(relativePath, - AbfsAclHelper.serializeAclSpec(aclEntries), eTag); + final AbfsRestOperation setAclOp = client + .setAcl(relativePath, AbfsAclHelper.serializeAclSpec(aclEntries), + eTag, tracingContext); perfInfoSet.registerResult(setAclOp.getResult()) .registerSuccess(true) .registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); @@ -1165,8 +1200,9 @@ public void modifyAclEntries(final Path path, final List aclSpec) thro } } - public void removeAclEntries(final Path path, final List aclSpec) throws AzureBlobFileSystemException { - if (!getIsNamespaceEnabled()) { + public void removeAclEntries(final Path path, final List aclSpec, + TracingContext tracingContext) throws AzureBlobFileSystemException { + if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException( "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } @@ -1185,7 +1221,8 @@ public void removeAclEntries(final Path path, final List aclSpec) thro String relativePath = getRelativePath(path); - final AbfsRestOperation op = client.getAclStatus(relativePath, isUpnFormat); + final AbfsRestOperation op = client + .getAclStatus(relativePath, isUpnFormat, tracingContext); perfInfoGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); @@ -1196,9 +1233,9 @@ public void removeAclEntries(final Path path, final List aclSpec) thro perfInfoGet.registerSuccess(true).finishTracking(); try (AbfsPerfInfo perfInfoSet = startTracking("removeAclEntries", "setAcl")) { - final AbfsRestOperation setAclOp = - client.setAcl(relativePath, - AbfsAclHelper.serializeAclSpec(aclEntries), eTag); + final AbfsRestOperation setAclOp = client + .setAcl(relativePath, AbfsAclHelper.serializeAclSpec(aclEntries), + eTag, tracingContext); perfInfoSet.registerResult(setAclOp.getResult()) .registerSuccess(true) .registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); @@ -1206,8 +1243,9 @@ public void removeAclEntries(final Path path, final List aclSpec) thro } } - public void removeDefaultAcl(final Path path) throws AzureBlobFileSystemException { - if (!getIsNamespaceEnabled()) { + public void removeDefaultAcl(final Path path, TracingContext tracingContext) + throws AzureBlobFileSystemException { + if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException( "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } @@ -1221,7 +1259,8 @@ public void removeDefaultAcl(final Path path) throws AzureBlobFileSystemExceptio String relativePath = getRelativePath(path); - final AbfsRestOperation op = client.getAclStatus(relativePath); + final AbfsRestOperation op = client + .getAclStatus(relativePath, tracingContext); perfInfoGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); @@ -1238,9 +1277,9 @@ public void removeDefaultAcl(final Path path) throws AzureBlobFileSystemExceptio perfInfoGet.registerSuccess(true).finishTracking(); try (AbfsPerfInfo perfInfoSet = startTracking("removeDefaultAcl", "setAcl")) { - final AbfsRestOperation setAclOp = - client.setAcl(relativePath, - AbfsAclHelper.serializeAclSpec(aclEntries), eTag); + final AbfsRestOperation setAclOp = client + .setAcl(relativePath, AbfsAclHelper.serializeAclSpec(aclEntries), + eTag, tracingContext); perfInfoSet.registerResult(setAclOp.getResult()) .registerSuccess(true) .registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); @@ -1248,8 +1287,9 @@ public void removeDefaultAcl(final Path path) throws AzureBlobFileSystemExceptio } } - public void removeAcl(final Path path) throws AzureBlobFileSystemException { - if (!getIsNamespaceEnabled()) { + public void removeAcl(final Path path, TracingContext tracingContext) + throws AzureBlobFileSystemException { + if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException( "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } @@ -1263,7 +1303,8 @@ public void removeAcl(final Path path) throws AzureBlobFileSystemException { String relativePath = getRelativePath(path); - final AbfsRestOperation op = client.getAclStatus(relativePath); + final AbfsRestOperation op = client + .getAclStatus(relativePath, tracingContext); perfInfoGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); @@ -1277,9 +1318,9 @@ public void removeAcl(final Path path) throws AzureBlobFileSystemException { perfInfoGet.registerSuccess(true).finishTracking(); try (AbfsPerfInfo perfInfoSet = startTracking("removeAcl", "setAcl")) { - final AbfsRestOperation setAclOp = - client.setAcl(relativePath, - AbfsAclHelper.serializeAclSpec(newAclEntries), eTag); + final AbfsRestOperation setAclOp = client + .setAcl(relativePath, AbfsAclHelper.serializeAclSpec(newAclEntries), + eTag, tracingContext); perfInfoSet.registerResult(setAclOp.getResult()) .registerSuccess(true) .registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); @@ -1287,8 +1328,9 @@ public void removeAcl(final Path path) throws AzureBlobFileSystemException { } } - public void setAcl(final Path path, final List aclSpec) throws AzureBlobFileSystemException { - if (!getIsNamespaceEnabled()) { + public void setAcl(final Path path, final List aclSpec, + TracingContext tracingContext) throws AzureBlobFileSystemException { + if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException( "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } @@ -1307,7 +1349,8 @@ public void setAcl(final Path path, final List aclSpec) throws AzureBl String relativePath = getRelativePath(path); - final AbfsRestOperation op = client.getAclStatus(relativePath, isUpnFormat); + final AbfsRestOperation op = client + .getAclStatus(relativePath, isUpnFormat, tracingContext); perfInfoGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); @@ -1320,7 +1363,7 @@ public void setAcl(final Path path, final List aclSpec) throws AzureBl try (AbfsPerfInfo perfInfoSet = startTracking("setAcl", "setAcl")) { final AbfsRestOperation setAclOp = client.setAcl(relativePath, - AbfsAclHelper.serializeAclSpec(aclEntries), eTag); + AbfsAclHelper.serializeAclSpec(aclEntries), eTag, tracingContext); perfInfoSet.registerResult(setAclOp.getResult()) .registerSuccess(true) .registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); @@ -1328,8 +1371,9 @@ public void setAcl(final Path path, final List aclSpec) throws AzureBl } } - public AclStatus getAclStatus(final Path path) throws IOException { - if (!getIsNamespaceEnabled()) { + public AclStatus getAclStatus(final Path path, TracingContext tracingContext) + throws IOException { + if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException( "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } @@ -1341,7 +1385,8 @@ public AclStatus getAclStatus(final Path path) throws IOException { client.getFileSystem(), path); - AbfsRestOperation op = client.getAclStatus(getRelativePath(path)); + AbfsRestOperation op = client + .getAclStatus(getRelativePath(path), tracingContext); AbfsHttpOperation result = op.getResult(); perfInfo.registerResult(result); @@ -1374,19 +1419,19 @@ public AclStatus getAclStatus(final Path path) throws IOException { } } - public void access(final Path path, final FsAction mode) - throws AzureBlobFileSystemException { + public void access(final Path path, final FsAction mode, + TracingContext tracingContext) throws AzureBlobFileSystemException { LOG.debug("access for filesystem: {}, path: {}, mode: {}", this.client.getFileSystem(), path, mode); if (!this.abfsConfiguration.isCheckAccessEnabled() - || !getIsNamespaceEnabled()) { + || !getIsNamespaceEnabled(tracingContext)) { LOG.debug("Returning; either check access is not enabled or the account" + " used is not namespace enabled"); return; } try (AbfsPerfInfo perfInfo = startTracking("access", "checkAccess")) { final AbfsRestOperation op = this.client - .checkAccess(getRelativePath(path), mode.SYMBOL); + .checkAccess(getRelativePath(path), mode.SYMBOL, tracingContext); perfInfo.registerResult(op.getResult()).registerSuccess(true); } } @@ -1479,7 +1524,7 @@ private void initializeClient(URI uri, String fileSystemName, private AbfsClientContext populateAbfsClientContext() { return new AbfsClientContextBuilder() .withExponentialRetryPolicy( - new ExponentialRetryPolicy(abfsConfiguration.getMaxIoRetries())) + new ExponentialRetryPolicy(abfsConfiguration)) .withAbfsCounters(abfsCounters) .withAbfsPerfTracker(abfsPerfTracker) .build(); @@ -1699,13 +1744,13 @@ private void updateInfiniteLeaseDirs() { this.azureInfiniteLeaseDirSet.remove(""); } - private AbfsLease maybeCreateLease(String relativePath) + private AbfsLease maybeCreateLease(String relativePath, TracingContext tracingContext) throws AzureBlobFileSystemException { boolean enableInfiniteLease = isInfiniteLeaseKey(relativePath); if (!enableInfiniteLease) { return null; } - AbfsLease lease = new AbfsLease(client, relativePath); + AbfsLease lease = new AbfsLease(client, relativePath, tracingContext); leaseRefs.put(lease, null); return lease; } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java index 276203b042cfd..4a2c5951bd53d 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java @@ -117,6 +117,12 @@ public final class ConfigurationKeys { * Default value of this config is true. **/ public static final String FS_AZURE_DISABLE_OUTPUTSTREAM_FLUSH = "fs.azure.disable.outputstream.flush"; public static final String FS_AZURE_USER_AGENT_PREFIX_KEY = "fs.azure.user.agent.prefix"; + /** + * The client correlation ID provided over config that will be added to + * x-ms-client-request-Id header. Defaults to empty string if the length and + * character constraints are not satisfied. **/ + public static final String FS_AZURE_CLIENT_CORRELATIONID = "fs.azure.client.correlationid"; + public static final String FS_AZURE_TRACINGHEADER_FORMAT = "fs.azure.tracingheader.format"; public static final String FS_AZURE_CLUSTER_NAME = "fs.azure.cluster.name"; public static final String FS_AZURE_CLUSTER_TYPE = "fs.azure.cluster.type"; public static final String FS_AZURE_SSL_CHANNEL_MODE_KEY = "fs.azure.ssl.channel.mode"; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FSOperationType.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FSOperationType.java new file mode 100644 index 0000000000000..6b6e98c9c7082 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FSOperationType.java @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"), you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.constants; + +public enum FSOperationType { + ACCESS("AS"), + APPEND("AP"), + BREAK_LEASE("BL"), + CREATE("CR"), + CREATE_FILESYSTEM("CF"), + CREATE_NON_RECURSIVE("CN"), + DELETE("DL"), + GET_ACL_STATUS("GA"), + GET_ATTR("GR"), + GET_FILESTATUS("GF"), + LISTSTATUS("LS"), + MKDIR("MK"), + MODIFY_ACL("MA"), + OPEN("OP"), + HAS_PATH_CAPABILITY("PC"), + SET_PERMISSION("SP"), + READ("RE"), + RELEASE_LEASE("RL"), + REMOVE_ACL("RA"), + REMOVE_ACL_ENTRIES("RT"), + REMOVE_DEFAULT_ACL("RD"), + RENAME("RN"), + SET_ATTR("SR"), + SET_OWNER("SO"), + SET_ACL("SA"), + TEST_OP("TS"), + WRITE("WR"); + + private final String opCode; + + FSOperationType(String opCode) { + this.opCode = opCode; + } + + @Override + public String toString() { + return opCode; + } +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java index 6ef3ca40f36e1..a1de9dfc0aca9 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java @@ -112,6 +112,7 @@ public final class FileSystemConfigurations { public static final boolean DEFAULT_DELETE_CONSIDERED_IDEMPOTENT = true; public static final int DEFAULT_CLOCK_SKEW_WITH_SERVER_IN_MS = 5 * 60 * 1000; // 5 mins + public static final int STREAM_ID_LEN = 12; public static final boolean DEFAULT_ENABLE_ABFS_LIST_ITERATOR = true; private FileSystemConfigurations() {} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java index c5c218d3fb257..27206959ba533 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java @@ -63,6 +63,7 @@ import org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters; import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider; import org.apache.hadoop.fs.azurebfs.utils.DateTimeUtils; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory; import org.apache.hadoop.util.concurrent.HadoopExecutors; @@ -237,7 +238,7 @@ AbfsUriQueryBuilder createDefaultUriQueryBuilder() { return abfsUriQueryBuilder; } - public AbfsRestOperation createFilesystem() throws AzureBlobFileSystemException { + public AbfsRestOperation createFilesystem(TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); final AbfsUriQueryBuilder abfsUriQueryBuilder = new AbfsUriQueryBuilder(); @@ -250,11 +251,11 @@ public AbfsRestOperation createFilesystem() throws AzureBlobFileSystemException HTTP_METHOD_PUT, url, requestHeaders); - op.execute(); + op.execute(tracingContext); return op; } - public AbfsRestOperation setFilesystemProperties(final String properties) throws AzureBlobFileSystemException { + public AbfsRestOperation setFilesystemProperties(final String properties, TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); // JDK7 does not support PATCH, so to workaround the issue we will use // PUT and specify the real method in the X-Http-Method-Override header. @@ -274,12 +275,13 @@ public AbfsRestOperation setFilesystemProperties(final String properties) throws HTTP_METHOD_PUT, url, requestHeaders); - op.execute(); + op.execute(tracingContext); return op; } public AbfsRestOperation listPath(final String relativePath, final boolean recursive, final int listMaxResults, - final String continuation) throws AzureBlobFileSystemException { + final String continuation, TracingContext tracingContext) + throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); @@ -298,11 +300,11 @@ public AbfsRestOperation listPath(final String relativePath, final boolean recur HTTP_METHOD_GET, url, requestHeaders); - op.execute(); + op.execute(tracingContext); return op; } - public AbfsRestOperation getFilesystemProperties() throws AzureBlobFileSystemException { + public AbfsRestOperation getFilesystemProperties(TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); @@ -315,11 +317,11 @@ public AbfsRestOperation getFilesystemProperties() throws AzureBlobFileSystemExc HTTP_METHOD_HEAD, url, requestHeaders); - op.execute(); + op.execute(tracingContext); return op; } - public AbfsRestOperation deleteFilesystem() throws AzureBlobFileSystemException { + public AbfsRestOperation deleteFilesystem(TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); @@ -332,13 +334,14 @@ public AbfsRestOperation deleteFilesystem() throws AzureBlobFileSystemException HTTP_METHOD_DELETE, url, requestHeaders); - op.execute(); + op.execute(tracingContext); return op; } public AbfsRestOperation createPath(final String path, final boolean isFile, final boolean overwrite, final String permission, final String umask, - final boolean isAppendBlob, final String eTag) throws AzureBlobFileSystemException { + final boolean isAppendBlob, final String eTag, + TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); if (isFile) { addCustomerProvidedKeyHeaders(requestHeaders); @@ -378,7 +381,7 @@ public AbfsRestOperation createPath(final String path, final boolean isFile, fin url, requestHeaders); try { - op.execute(); + op.execute(tracingContext); } catch (AzureBlobFileSystemException ex) { if (!isFile && op.getResult().getStatusCode() == HttpURLConnection.HTTP_CONFLICT) { String existingResource = @@ -392,7 +395,7 @@ public AbfsRestOperation createPath(final String path, final boolean isFile, fin return op; } - public AbfsRestOperation acquireLease(final String path, int duration) throws AzureBlobFileSystemException { + public AbfsRestOperation acquireLease(final String path, int duration, TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); requestHeaders.add(new AbfsHttpHeader(X_MS_LEASE_ACTION, ACQUIRE_LEASE_ACTION)); @@ -408,11 +411,12 @@ public AbfsRestOperation acquireLease(final String path, int duration) throws Az HTTP_METHOD_POST, url, requestHeaders); - op.execute(); + op.execute(tracingContext); return op; } - public AbfsRestOperation renewLease(final String path, final String leaseId) throws AzureBlobFileSystemException { + public AbfsRestOperation renewLease(final String path, final String leaseId, + TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); requestHeaders.add(new AbfsHttpHeader(X_MS_LEASE_ACTION, RENEW_LEASE_ACTION)); @@ -427,11 +431,12 @@ public AbfsRestOperation renewLease(final String path, final String leaseId) thr HTTP_METHOD_POST, url, requestHeaders); - op.execute(); + op.execute(tracingContext); return op; } - public AbfsRestOperation releaseLease(final String path, final String leaseId) throws AzureBlobFileSystemException { + public AbfsRestOperation releaseLease(final String path, + final String leaseId, TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); requestHeaders.add(new AbfsHttpHeader(X_MS_LEASE_ACTION, RELEASE_LEASE_ACTION)); @@ -446,11 +451,12 @@ public AbfsRestOperation releaseLease(final String path, final String leaseId) t HTTP_METHOD_POST, url, requestHeaders); - op.execute(); + op.execute(tracingContext); return op; } - public AbfsRestOperation breakLease(final String path) throws AzureBlobFileSystemException { + public AbfsRestOperation breakLease(final String path, + TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); requestHeaders.add(new AbfsHttpHeader(X_MS_LEASE_ACTION, BREAK_LEASE_ACTION)); @@ -465,12 +471,13 @@ public AbfsRestOperation breakLease(final String path) throws AzureBlobFileSyste HTTP_METHOD_POST, url, requestHeaders); - op.execute(); + op.execute(tracingContext); return op; } - public AbfsRestOperation renamePath(String source, final String destination, final String continuation) - throws AzureBlobFileSystemException { + public AbfsRestOperation renamePath(String source, final String destination, + final String continuation, TracingContext tracingContext) + throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); String encodedRenameSource = urlEncode(FORWARD_SLASH + this.getFileSystem() + source); @@ -497,10 +504,10 @@ public AbfsRestOperation renamePath(String source, final String destination, fin requestHeaders); Instant renameRequestStartTime = Instant.now(); try { - op.execute(); + op.execute(tracingContext); } catch (AzureBlobFileSystemException e) { final AbfsRestOperation idempotencyOp = renameIdempotencyCheckOp( - renameRequestStartTime, op, destination); + renameRequestStartTime, op, destination, tracingContext); if (idempotencyOp.getResult().getStatusCode() == op.getResult().getStatusCode()) { // idempotency did not return different result @@ -525,13 +532,15 @@ public AbfsRestOperation renamePath(String source, final String destination, fin * @param renameRequestStartTime startTime for the rename request * @param op Rename request REST operation response * @param destination rename destination path + * @param tracingContext Tracks identifiers for request header * @return REST operation response post idempotency check * @throws AzureBlobFileSystemException if GetFileStatus hits any exception */ public AbfsRestOperation renameIdempotencyCheckOp( final Instant renameRequestStartTime, final AbfsRestOperation op, - final String destination) throws AzureBlobFileSystemException { + final String destination, + TracingContext tracingContext) throws AzureBlobFileSystemException { if ((op.isARetriedRequest()) && (op.getResult().getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND)) { // Server has returned HTTP 404, which means rename source no longer @@ -540,7 +549,7 @@ public AbfsRestOperation renameIdempotencyCheckOp( try { final AbfsRestOperation destStatusOp = getPathStatus(destination, - false); + false, tracingContext); if (destStatusOp.getResult().getStatusCode() == HttpURLConnection.HTTP_OK) { String lmt = destStatusOp.getResult().getResponseHeader( @@ -561,7 +570,7 @@ public AbfsRestOperation renameIdempotencyCheckOp( } public AbfsRestOperation append(final String path, final byte[] buffer, - AppendRequestParameters reqParams, final String cachedSasToken) + AppendRequestParameters reqParams, final String cachedSasToken, TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); addCustomerProvidedKeyHeaders(requestHeaders); @@ -601,11 +610,11 @@ public AbfsRestOperation append(final String path, final byte[] buffer, reqParams.getLength(), sasTokenForReuse); try { - op.execute(); + op.execute(tracingContext); } catch (AzureBlobFileSystemException e) { if (reqParams.isAppendBlob() && appendSuccessCheckOp(op, path, - (reqParams.getPosition() + reqParams.getLength()))) { + (reqParams.getPosition() + reqParams.getLength()), tracingContext)) { final AbfsRestOperation successOp = new AbfsRestOperation( AbfsRestOperationType.Append, this, @@ -631,10 +640,10 @@ && appendSuccessCheckOp(op, path, // Hence, we pass/succeed the appendblob append call // in case we are doing a retry after checking the length of the file public boolean appendSuccessCheckOp(AbfsRestOperation op, final String path, - final long length) throws AzureBlobFileSystemException { + final long length, TracingContext tracingContext) throws AzureBlobFileSystemException { if ((op.isARetriedRequest()) && (op.getResult().getStatusCode() == HttpURLConnection.HTTP_BAD_REQUEST)) { - final AbfsRestOperation destStatusOp = getPathStatus(path, false); + final AbfsRestOperation destStatusOp = getPathStatus(path, false, tracingContext); if (destStatusOp.getResult().getStatusCode() == HttpURLConnection.HTTP_OK) { String fileLength = destStatusOp.getResult().getResponseHeader( HttpHeaderConfigurations.CONTENT_LENGTH); @@ -647,9 +656,10 @@ public boolean appendSuccessCheckOp(AbfsRestOperation op, final String path, return false; } - public AbfsRestOperation flush(final String path, final long position, boolean retainUncommittedData, - boolean isClose, final String cachedSasToken, final String leaseId) - throws AzureBlobFileSystemException { + public AbfsRestOperation flush(final String path, final long position, + boolean retainUncommittedData, boolean isClose, + final String cachedSasToken, final String leaseId, + TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); addCustomerProvidedKeyHeaders(requestHeaders); // JDK7 does not support PATCH, so to workaround the issue we will use @@ -676,11 +686,12 @@ public AbfsRestOperation flush(final String path, final long position, boolean r HTTP_METHOD_PUT, url, requestHeaders, sasTokenForReuse); - op.execute(); + op.execute(tracingContext); return op; } - public AbfsRestOperation setPathProperties(final String path, final String properties) + public AbfsRestOperation setPathProperties(final String path, final String properties, + TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); addCustomerProvidedKeyHeaders(requestHeaders); @@ -702,11 +713,12 @@ public AbfsRestOperation setPathProperties(final String path, final String prope HTTP_METHOD_PUT, url, requestHeaders); - op.execute(); + op.execute(tracingContext); return op; } - public AbfsRestOperation getPathStatus(final String path, final boolean includeProperties) throws AzureBlobFileSystemException { + public AbfsRestOperation getPathStatus(final String path, final boolean includeProperties, + TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); @@ -730,12 +742,13 @@ public AbfsRestOperation getPathStatus(final String path, final boolean includeP HTTP_METHOD_HEAD, url, requestHeaders); - op.execute(); + op.execute(tracingContext); return op; } public AbfsRestOperation read(final String path, final long position, final byte[] buffer, final int bufferOffset, - final int bufferLength, final String eTag, String cachedSasToken) throws AzureBlobFileSystemException { + final int bufferLength, final String eTag, String cachedSasToken, + TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); addCustomerProvidedKeyHeaders(requestHeaders); requestHeaders.add(new AbfsHttpHeader(RANGE, @@ -748,7 +761,6 @@ public AbfsRestOperation read(final String path, final long position, final byte abfsUriQueryBuilder, cachedSasToken); final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); - final AbfsRestOperation op = new AbfsRestOperation( AbfsRestOperationType.ReadFile, this, @@ -758,12 +770,13 @@ public AbfsRestOperation read(final String path, final long position, final byte buffer, bufferOffset, bufferLength, sasTokenForReuse); - op.execute(); + op.execute(tracingContext); return op; } - public AbfsRestOperation deletePath(final String path, final boolean recursive, final String continuation) + public AbfsRestOperation deletePath(final String path, final boolean recursive, final String continuation, + TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); @@ -781,7 +794,7 @@ public AbfsRestOperation deletePath(final String path, final boolean recursive, url, requestHeaders); try { - op.execute(); + op.execute(tracingContext); } catch (AzureBlobFileSystemException e) { final AbfsRestOperation idempotencyOp = deleteIdempotencyCheckOp(op); if (idempotencyOp.getResult().getStatusCode() @@ -832,7 +845,8 @@ public AbfsRestOperation deleteIdempotencyCheckOp(final AbfsRestOperation op) { return op; } - public AbfsRestOperation setOwner(final String path, final String owner, final String group) + public AbfsRestOperation setOwner(final String path, final String owner, final String group, + TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); // JDK7 does not support PATCH, so to workaround the issue we will use @@ -858,11 +872,12 @@ public AbfsRestOperation setOwner(final String path, final String owner, final S AbfsHttpConstants.HTTP_METHOD_PUT, url, requestHeaders); - op.execute(); + op.execute(tracingContext); return op; } - public AbfsRestOperation setPermission(final String path, final String permission) + public AbfsRestOperation setPermission(final String path, final String permission, + TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); // JDK7 does not support PATCH, so to workaround the issue we will use @@ -883,15 +898,17 @@ public AbfsRestOperation setPermission(final String path, final String permissio AbfsHttpConstants.HTTP_METHOD_PUT, url, requestHeaders); - op.execute(); + op.execute(tracingContext); return op; } - public AbfsRestOperation setAcl(final String path, final String aclSpecString) throws AzureBlobFileSystemException { - return setAcl(path, aclSpecString, AbfsHttpConstants.EMPTY_STRING); + public AbfsRestOperation setAcl(final String path, final String aclSpecString, + TracingContext tracingContext) throws AzureBlobFileSystemException { + return setAcl(path, aclSpecString, AbfsHttpConstants.EMPTY_STRING, tracingContext); } - public AbfsRestOperation setAcl(final String path, final String aclSpecString, final String eTag) + public AbfsRestOperation setAcl(final String path, final String aclSpecString, final String eTag, + TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); // JDK7 does not support PATCH, so to workaround the issue we will use @@ -916,15 +933,17 @@ public AbfsRestOperation setAcl(final String path, final String aclSpecString, f AbfsHttpConstants.HTTP_METHOD_PUT, url, requestHeaders); - op.execute(); + op.execute(tracingContext); return op; } - public AbfsRestOperation getAclStatus(final String path) throws AzureBlobFileSystemException { - return getAclStatus(path, abfsConfiguration.isUpnUsed()); + public AbfsRestOperation getAclStatus(final String path, TracingContext tracingContext) + throws AzureBlobFileSystemException { + return getAclStatus(path, abfsConfiguration.isUpnUsed(), tracingContext); } - public AbfsRestOperation getAclStatus(final String path, final boolean useUPN) throws AzureBlobFileSystemException { + public AbfsRestOperation getAclStatus(final String path, final boolean useUPN, + TracingContext tracingContext) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); @@ -939,7 +958,7 @@ public AbfsRestOperation getAclStatus(final String path, final boolean useUPN) t AbfsHttpConstants.HTTP_METHOD_HEAD, url, requestHeaders); - op.execute(); + op.execute(tracingContext); return op; } @@ -949,10 +968,11 @@ public AbfsRestOperation getAclStatus(final String path, final boolean useUPN) t * * @param path Path for which access check needs to be performed * @param rwx The permission to be checked on the path + * @param tracingContext Tracks identifiers for request header * @return The {@link AbfsRestOperation} object for the operation * @throws AzureBlobFileSystemException in case of bad requests */ - public AbfsRestOperation checkAccess(String path, String rwx) + public AbfsRestOperation checkAccess(String path, String rwx, TracingContext tracingContext) throws AzureBlobFileSystemException { AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); abfsUriQueryBuilder.addQuery(QUERY_PARAM_ACTION, CHECK_ACCESS); @@ -962,7 +982,7 @@ public AbfsRestOperation checkAccess(String path, String rwx) AbfsRestOperation op = new AbfsRestOperation( AbfsRestOperationType.CheckAccess, this, AbfsHttpConstants.HTTP_METHOD_HEAD, url, createDefaultHeaders()); - op.execute(); + op.execute(tracingContext); return op; } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java index 720b99b9f8390..5d71c9eee7941 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java @@ -26,7 +26,6 @@ import java.net.URL; import java.net.URLEncoder; import java.util.List; -import java.util.UUID; import javax.net.ssl.HttpsURLConnection; import javax.net.ssl.SSLSocketFactory; @@ -71,7 +70,6 @@ public class AbfsHttpOperation implements AbfsPerfLoggable { private String statusDescription; private String storageErrorCode = ""; private String storageErrorMessage = ""; - private String clientRequestId = ""; private String requestId = ""; private String expectedAppendPos = ""; private ListResultSchema listResultSchema = null; @@ -139,7 +137,8 @@ public String getStorageErrorMessage() { } public String getClientRequestId() { - return clientRequestId; + return this.connection + .getRequestProperty(HttpHeaderConfigurations.X_MS_CLIENT_REQUEST_ID); } public String getExpectedAppendPos() { @@ -176,7 +175,7 @@ public String toString() { sb.append(","); sb.append(expectedAppendPos); sb.append(",cid="); - sb.append(clientRequestId); + sb.append(getClientRequestId()); sb.append(",rid="); sb.append(requestId); if (isTraceEnabled) { @@ -207,7 +206,7 @@ public String getLogString() { .append(" e=") .append(storageErrorCode) .append(" ci=") - .append(clientRequestId) + .append(getClientRequestId()) .append(" ri=") .append(requestId); @@ -246,7 +245,6 @@ public AbfsHttpOperation(final URL url, final String method, final List 0 && nextOffset < contentLength) { LOG.debug("issuing read ahead requestedOffset = {} requested size {}", nextOffset, nextSize); - ReadBufferManager.getBufferManager().queueReadAhead(this, nextOffset, (int) nextSize); + ReadBufferManager.getBufferManager().queueReadAhead(this, nextOffset, (int) nextSize, + new TracingContext(readAheadTracingContext)); nextOffset = nextOffset + nextSize; numReadAheads--; // From next round onwards should be of readahead block size. @@ -486,15 +507,15 @@ private int readInternal(final long position, final byte[] b, final int offset, } // got nothing from read-ahead, do our own read now - receivedBytes = readRemote(position, b, offset, length); + receivedBytes = readRemote(position, b, offset, length, new TracingContext(tracingContext)); return receivedBytes; } else { LOG.debug("read ahead disabled, reading remote"); - return readRemote(position, b, offset, length); + return readRemote(position, b, offset, length, new TracingContext(tracingContext)); } } - int readRemote(long position, byte[] b, int offset, int length) throws IOException { + int readRemote(long position, byte[] b, int offset, int length, TracingContext tracingContext) throws IOException { if (position < 0) { throw new IllegalArgumentException("attempting to read from negative offset"); } @@ -521,7 +542,7 @@ int readRemote(long position, byte[] b, int offset, int length) throws IOExcepti } LOG.trace("Trigger client.read for path={} position={} offset={} length={}", path, position, offset, length); op = client.read(path, position, b, offset, length, - tolerateOobAppends ? "*" : eTag, cachedSasToken.get()); + tolerateOobAppends ? "*" : eTag, cachedSasToken.get(), tracingContext); cachedSasToken.update(op.getSasToken()); LOG.debug("issuing HTTP GET request params position = {} b.length = {} " + "offset = {} length = {}", position, b.length, offset, length); @@ -656,6 +677,10 @@ public synchronized long getPos() throws IOException { return nextReadPos < 0 ? 0 : nextReadPos; } + public TracingContext getTracingContext() { + return tracingContext; + } + /** * Seeks a different copy of the data. Returns true if * found a new source, false otherwise. @@ -729,6 +754,11 @@ protected void setCachedSasToken(final CachedSASToken cachedSasToken) { this.cachedSasToken = cachedSasToken; } + @VisibleForTesting + public String getStreamID() { + return inputStreamId; + } + /** * Getter for AbfsInputStreamStatistics. * @@ -739,6 +769,12 @@ public AbfsInputStreamStatistics getStreamStatistics() { return streamStatistics; } + @VisibleForTesting + public void registerListener(Listener listener1) { + listener = listener1; + tracingContext.setListener(listener); + } + /** * Getter for bytes read from readAhead buffer that fills asynchronously. * diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsLease.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsLease.java index 97a8b0228a5b3..2e97598ef04f3 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsLease.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsLease.java @@ -30,7 +30,9 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; @@ -60,6 +62,7 @@ public final class AbfsLease { private final AbfsClient client; private final String path; + private final TracingContext tracingContext; // Lease status variables private volatile boolean leaseFreed; @@ -78,16 +81,18 @@ public LeaseException(String s) { } } - public AbfsLease(AbfsClient client, String path) throws AzureBlobFileSystemException { - this(client, path, DEFAULT_LEASE_ACQUIRE_MAX_RETRIES, DEFAULT_LEASE_ACQUIRE_RETRY_INTERVAL); + public AbfsLease(AbfsClient client, String path, TracingContext tracingContext) throws AzureBlobFileSystemException { + this(client, path, DEFAULT_LEASE_ACQUIRE_MAX_RETRIES, + DEFAULT_LEASE_ACQUIRE_RETRY_INTERVAL, tracingContext); } @VisibleForTesting public AbfsLease(AbfsClient client, String path, int acquireMaxRetries, - int acquireRetryInterval) throws AzureBlobFileSystemException { + int acquireRetryInterval, TracingContext tracingContext) throws AzureBlobFileSystemException { this.leaseFreed = false; this.client = client; this.path = path; + this.tracingContext = tracingContext; if (client.getNumLeaseThreads() < 1) { throw new LeaseException(ERR_NO_LEASE_THREADS); @@ -96,7 +101,8 @@ public AbfsLease(AbfsClient client, String path, int acquireMaxRetries, // Try to get the lease a specified number of times, else throw an error RetryPolicy retryPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep( acquireMaxRetries, acquireRetryInterval, TimeUnit.SECONDS); - acquireLease(retryPolicy, 0, acquireRetryInterval, 0); + acquireLease(retryPolicy, 0, acquireRetryInterval, 0, + new TracingContext(tracingContext)); while (leaseID == null && exception == null) { try { @@ -114,13 +120,15 @@ public AbfsLease(AbfsClient client, String path, int acquireMaxRetries, LOG.debug("Acquired lease {} on {}", leaseID, path); } - private void acquireLease(RetryPolicy retryPolicy, int numRetries, int retryInterval, long delay) + private void acquireLease(RetryPolicy retryPolicy, int numRetries, + int retryInterval, long delay, TracingContext tracingContext) throws LeaseException { LOG.debug("Attempting to acquire lease on {}, retry {}", path, numRetries); if (future != null && !future.isDone()) { throw new LeaseException(ERR_LEASE_FUTURE_EXISTS); } - future = client.schedule(() -> client.acquireLease(path, INFINITE_LEASE_DURATION), + future = client.schedule(() -> client.acquireLease(path, + INFINITE_LEASE_DURATION, tracingContext), delay, TimeUnit.SECONDS); client.addCallback(future, new FutureCallback() { @Override @@ -136,7 +144,8 @@ public void onFailure(Throwable throwable) { == retryPolicy.shouldRetry(null, numRetries, 0, true).action) { LOG.debug("Failed to acquire lease on {}, retrying: {}", path, throwable); acquireRetryCount++; - acquireLease(retryPolicy, numRetries + 1, retryInterval, retryInterval); + acquireLease(retryPolicy, numRetries + 1, retryInterval, + retryInterval, tracingContext); } else { exception = throwable; } @@ -161,7 +170,9 @@ public void free() { if (future != null && !future.isDone()) { future.cancel(true); } - client.releaseLease(path, leaseID); + TracingContext tracingContext = new TracingContext(this.tracingContext); + tracingContext.setOperation(FSOperationType.RELEASE_LEASE); + client.releaseLease(path, leaseID, tracingContext); } catch (IOException e) { LOG.warn("Exception when trying to release lease {} on {}. Lease will need to be broken: {}", leaseID, path, e.getMessage()); @@ -185,4 +196,9 @@ public String getLeaseID() { public int getAcquireRetryCount() { return acquireRetryCount; } + + @VisibleForTesting + public TracingContext getTracingContext() { + return tracingContext; + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsListStatusRemoteIterator.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsListStatusRemoteIterator.java index 0c664fc2fbbc4..835217f945af5 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsListStatusRemoteIterator.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsListStatusRemoteIterator.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; public class AbfsListStatusRemoteIterator implements RemoteIterator { @@ -48,6 +49,7 @@ public class AbfsListStatusRemoteIterator private final FileStatus fileStatus; private final ListingSupport listingSupport; private final ArrayBlockingQueue iteratorsQueue; + private final TracingContext tracingContext; private volatile boolean isAsyncInProgress = false; private boolean isIterationComplete = false; @@ -55,9 +57,10 @@ public class AbfsListStatusRemoteIterator private Iterator currIterator; public AbfsListStatusRemoteIterator(final FileStatus fileStatus, - final ListingSupport listingSupport) { + final ListingSupport listingSupport, TracingContext tracingContext) { this.fileStatus = fileStatus; this.listingSupport = listingSupport; + this.tracingContext = tracingContext; iteratorsQueue = new ArrayBlockingQueue<>(MAX_QUEUE_SIZE); currIterator = Collections.emptyIterator(); fetchBatchesAsync(); @@ -145,7 +148,7 @@ private void addNextBatchIteratorToQueue() List fileStatuses = new ArrayList<>(); continuation = listingSupport .listStatus(fileStatus.getPath(), null, fileStatuses, FETCH_ALL_FALSE, - continuation); + continuation, tracingContext); if (!fileStatuses.isEmpty()) { iteratorsQueue.put(fileStatuses.iterator()); } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java index 334f7d63e0607..91b068a78c93f 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java @@ -30,6 +30,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; +import java.util.UUID; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; @@ -37,11 +38,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.PathIOException; +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; import org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters; import org.apache.hadoop.fs.azurebfs.utils.CachedSASToken; +import org.apache.hadoop.fs.azurebfs.utils.Listener; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import org.apache.hadoop.fs.statistics.DurationTracker; import org.apache.hadoop.fs.statistics.IOStatistics; import org.apache.hadoop.fs.statistics.IOStatisticsSource; @@ -51,6 +56,7 @@ import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.fs.Syncable; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.STREAM_ID_LEN; import static org.apache.hadoop.fs.azurebfs.services.AbfsErrors.ERR_WRITE_WITHOUT_LEASE; import static org.apache.hadoop.fs.impl.StoreImplementationUtils.isProbeForSyncable; import static org.apache.hadoop.io.IOUtils.wrapException; @@ -90,6 +96,9 @@ public class AbfsOutputStream extends OutputStream implements Syncable, // SAS tokens can be re-used until they expire private CachedSASToken cachedSasToken; + private final String outputStreamId; + private final TracingContext tracingContext; + private Listener listener; private AbfsLease lease; private String leaseId; @@ -115,7 +124,8 @@ public AbfsOutputStream( final Statistics statistics, final String path, final long position, - AbfsOutputStreamContext abfsOutputStreamContext) { + AbfsOutputStreamContext abfsOutputStreamContext, + TracingContext tracingContext) { this.client = client; this.statistics = statistics; this.path = path; @@ -160,6 +170,14 @@ public AbfsOutputStream( if (outputStreamStatistics != null) { this.ioStatistics = outputStreamStatistics.getIOStatistics(); } + this.outputStreamId = createOutputStreamId(); + this.tracingContext = new TracingContext(tracingContext); + this.tracingContext.setStreamID(outputStreamId); + this.tracingContext.setOperation(FSOperationType.WRITE); + } + + private String createOutputStreamId() { + return StringUtils.right(UUID.randomUUID().toString(), STREAM_ID_LEN); } /** @@ -292,6 +310,15 @@ public void hflush() throws IOException { } } + public String getStreamID() { + return outputStreamId; + } + + public void registerListener(Listener listener1) { + listener = listener1; + tracingContext.setListener(listener); + } + /** * Force all data in the output stream to be written to Azure storage. * Wait to return until this is complete. Close the access to the stream and @@ -385,7 +412,9 @@ private void writeAppendBlobCurrentBufferToService() throws IOException { "writeCurrentBufferToService", "append")) { AppendRequestParameters reqParams = new AppendRequestParameters(offset, 0, bytesLength, APPEND_MODE, true, leaseId); - AbfsRestOperation op = client.append(path, bytes, reqParams, cachedSasToken.get()); + AbfsRestOperation op = client + .append(path, bytes, reqParams, cachedSasToken.get(), + new TracingContext(tracingContext)); cachedSasToken.update(op.getSasToken()); if (outputStreamStatistics != null) { outputStreamStatistics.uploadSuccessful(bytesLength); @@ -444,10 +473,8 @@ private synchronized void writeCurrentBufferToService(boolean isFlush, boolean i waitForTaskToComplete(); } } - final Future job = - completionService.submit(() -> { - AbfsPerfTracker tracker = - client.getAbfsPerfTracker(); + final Future job = completionService.submit(() -> { + AbfsPerfTracker tracker = client.getAbfsPerfTracker(); try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, "writeCurrentBufferToService", "append")) { AppendRequestParameters.Mode @@ -460,7 +487,7 @@ private synchronized void writeCurrentBufferToService(boolean isFlush, boolean i AppendRequestParameters reqParams = new AppendRequestParameters( offset, 0, bytesLength, mode, false, leaseId); AbfsRestOperation op = client.append(path, bytes, reqParams, - cachedSasToken.get()); + cachedSasToken.get(), new TracingContext(tracingContext)); cachedSasToken.update(op.getSasToken()); perfInfo.registerResult(op.getResult()); byteBufferPool.putBuffer(ByteBuffer.wrap(bytes)); @@ -527,7 +554,7 @@ private synchronized void flushWrittenBytesToServiceInternal(final long offset, try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, "flushWrittenBytesToServiceInternal", "flush")) { AbfsRestOperation op = client.flush(path, offset, retainUncommitedData, isClose, - cachedSasToken.get(), leaseId); + cachedSasToken.get(), leaseId, new TracingContext(tracingContext)); cachedSasToken.update(op.getSasToken()); perfInfo.registerResult(op.getResult()).registerSuccess(true); } catch (AzureBlobFileSystemException ex) { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java index 4c24c37a0dfb0..62576d8b371b0 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidAbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding; /** @@ -171,14 +172,15 @@ String getSasToken() { /** * Execute a AbfsRestOperation. Track the Duration of a request if * abfsCounters isn't null. - * + * @param tracingContext TracingContext instance to track correlation IDs */ - public void execute() throws AzureBlobFileSystemException { + public void execute(TracingContext tracingContext) + throws AzureBlobFileSystemException { try { IOStatisticsBinding.trackDurationOfInvocation(abfsCounters, AbfsStatistic.getStatNameFromHttpCall(method), - () -> completeExecute()); + () -> completeExecute(tracingContext)); } catch (AzureBlobFileSystemException aze) { throw aze; } catch (IOException e) { @@ -190,8 +192,10 @@ public void execute() throws AzureBlobFileSystemException { /** * Executes the REST operation with retry, by issuing one or more * HTTP operations. + * @param tracingContext TracingContext instance to track correlation IDs */ - private void completeExecute() throws AzureBlobFileSystemException { + private void completeExecute(TracingContext tracingContext) + throws AzureBlobFileSystemException { // see if we have latency reports from the previous requests String latencyHeader = this.client.getAbfsPerfTracker().getClientLatency(); if (latencyHeader != null && !latencyHeader.isEmpty()) { @@ -202,9 +206,10 @@ private void completeExecute() throws AzureBlobFileSystemException { retryCount = 0; LOG.debug("First execution of REST operation - {}", operationType); - while (!executeHttpOperation(retryCount)) { + while (!executeHttpOperation(retryCount, tracingContext)) { try { ++retryCount; + tracingContext.setRetryCount(retryCount); LOG.debug("Retrying REST operation {}. RetryCount = {}", operationType, retryCount); Thread.sleep(client.getRetryPolicy().getRetryInterval(retryCount)); @@ -226,12 +231,14 @@ private void completeExecute() throws AzureBlobFileSystemException { * fails, there may be a retry. The retryCount is incremented with each * attempt. */ - private boolean executeHttpOperation(final int retryCount) throws AzureBlobFileSystemException { + private boolean executeHttpOperation(final int retryCount, + TracingContext tracingContext) throws AzureBlobFileSystemException { AbfsHttpOperation httpOperation = null; try { // initialize the HTTP request and open the connection httpOperation = new AbfsHttpOperation(url, method, requestHeaders); incrementCounter(AbfsStatistic.CONNECTIONS_MADE, 1); + tracingContext.constructHeader(httpOperation); switch(client.getAuthType()) { case Custom: diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java index 9a75c78aa0612..89d99471a8214 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java @@ -21,6 +21,7 @@ import java.util.Random; import java.net.HttpURLConnection; +import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** @@ -89,6 +90,16 @@ public ExponentialRetryPolicy(final int maxIoRetries) { DEFAULT_CLIENT_BACKOFF); } + /** + * Initializes a new instance of the {@link ExponentialRetryPolicy} class. + * + * @param conf The {@link AbfsConfiguration} from which to retrieve retry configuration. + */ + public ExponentialRetryPolicy(AbfsConfiguration conf) { + this(conf.getMaxIoRetries(), conf.getMinBackoffIntervalMilliseconds(), conf.getMaxBackoffIntervalMilliseconds(), + conf.getBackoffIntervalMilliseconds()); + } + /** * Initializes a new instance of the {@link ExponentialRetryPolicy} class. * diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListingSupport.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListingSupport.java index 4c449409aafde..dc070a1d405d8 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListingSupport.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListingSupport.java @@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; @InterfaceAudience.Private @InterfaceStability.Unstable @@ -32,10 +33,11 @@ public interface ListingSupport { /** * @param path The list path. + * @param tracingContext TracingContext instance to track identifiers * @return the entries in the path. * @throws IOException in case of error */ - FileStatus[] listStatus(Path path) throws IOException; + FileStatus[] listStatus(Path path, TracingContext tracingContext) throws IOException; /** * @param path Path the list path. @@ -49,10 +51,11 @@ public interface ListingSupport { * non-existent entry in lexical order: listStatus * (Path("/folder"), "cfile") will return * "/folder/hfile" and "/folder/ifile". + * @param tracingContext TracingContext instance to track identifiers * @return the entries in the path start from "startFrom" in lexical order. * @throws IOException in case of error */ - FileStatus[] listStatus(Path path, String startFrom) throws IOException; + FileStatus[] listStatus(Path path, String startFrom, TracingContext tracingContext) throws IOException; /** * @param path The list path @@ -71,9 +74,10 @@ public interface ListingSupport { * filled with just one page os results or the entire * result. * @param continuation Contiuation token. null means start rom the begining. + * @param tracingContext TracingContext instance to track identifiers * @return Continuation tokem * @throws IOException in case of error */ String listStatus(Path path, String startFrom, List fileStatuses, - boolean fetchAll, String continuation) throws IOException; + boolean fetchAll, String continuation, TracingContext tracingContext) throws IOException; } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBuffer.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBuffer.java index 5d55726222de7..9ce926d841c84 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBuffer.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBuffer.java @@ -22,6 +22,7 @@ import java.util.concurrent.CountDownLatch; import org.apache.hadoop.fs.azurebfs.contracts.services.ReadBufferStatus; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import static org.apache.hadoop.fs.azurebfs.contracts.services.ReadBufferStatus.READ_FAILED; @@ -36,6 +37,7 @@ class ReadBuffer { private ReadBufferStatus status; // status of the buffer private CountDownLatch latch = null; // signaled when the buffer is done reading, so any client // waiting on this buffer gets unblocked + private TracingContext tracingContext; // fields to help with eviction logic private long timeStamp = 0; // tick at which buffer became available to read @@ -53,6 +55,14 @@ public void setStream(AbfsInputStream stream) { this.stream = stream; } + public void setTracingContext(TracingContext tracingContext) { + this.tracingContext = tracingContext; + } + + public TracingContext getTracingContext() { + return tracingContext; + } + public long getOffset() { return offset; } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManager.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManager.java index f330d790eb843..e7656c14493cb 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManager.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManager.java @@ -30,6 +30,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.locks.ReentrantLock; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** @@ -115,7 +116,8 @@ private ReadBufferManager() { * @param requestedOffset The offset in the file which shoukd be read * @param requestedLength The length to read */ - void queueReadAhead(final AbfsInputStream stream, final long requestedOffset, final int requestedLength) { + void queueReadAhead(final AbfsInputStream stream, final long requestedOffset, final int requestedLength, + TracingContext tracingContext) { if (LOGGER.isTraceEnabled()) { LOGGER.trace("Start Queueing readAhead for {} offset {} length {}", stream.getPath(), requestedOffset, requestedLength); @@ -136,6 +138,7 @@ void queueReadAhead(final AbfsInputStream stream, final long requestedOffset, fi buffer.setRequestedLength(requestedLength); buffer.setStatus(ReadBufferStatus.NOT_AVAILABLE); buffer.setLatch(new CountDownLatch(1)); + buffer.setTracingContext(tracingContext); Integer bufferIndex = freeList.pop(); // will return a value, since we have checked size > 0 already @@ -304,6 +307,7 @@ private boolean evict(final ReadBuffer buf) { } completedReadList.remove(buf); + buf.setTracingContext(null); if (LOGGER.isTraceEnabled()) { LOGGER.trace("Evicting buffer idx {}; was used for file {} offset {} length {}", buf.getBufferindex(), buf.getStream().getPath(), buf.getOffset(), buf.getLength()); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferWorker.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferWorker.java index 41acd7e06f132..21c9d1be76657 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferWorker.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferWorker.java @@ -69,7 +69,8 @@ public void run() { // If AbfsInputStream was created with bigger buffer size than // read-ahead buffer size, make sure a valid length is passed // for remote read - Math.min(buffer.getRequestedLength(), buffer.getBuffer().length)); + Math.min(buffer.getRequestedLength(), buffer.getBuffer().length), + buffer.getTracingContext()); bufferManager.doneReading(buffer, ReadBufferStatus.AVAILABLE, bytesRead); // post result back to ReadBufferManager } catch (Exception ex) { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/Listener.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/Listener.java new file mode 100644 index 0000000000000..4c2270a87f100 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/Listener.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.utils; + +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; + +/** + * Interface for testing identifiers tracked via TracingContext + * Implemented in TracingHeaderValidator + */ + +public interface Listener { + void callTracingHeaderValidator(String header, TracingHeaderFormat format); + void updatePrimaryRequestID(String primaryRequestID); + Listener getClone(); + void setOperation(FSOperationType operation); +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/TracingContext.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/TracingContext.java new file mode 100644 index 0000000000000..5a115451df159 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/TracingContext.java @@ -0,0 +1,185 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.utils; + +import java.util.UUID; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; +import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; +import org.apache.hadoop.fs.azurebfs.services.AbfsClient; +import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation; + +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.EMPTY_STRING; + +/** + * The TracingContext class to correlate Store requests using unique + * identifiers and resources common to requests (e.g. filesystem, stream) + * + * Implementing new HDFS method: + * Create TracingContext instance in method of outer layer of + * ABFS driver (AzureBlobFileSystem/AbfsInputStream/AbfsOutputStream), to be + * passed through ABFS layers up to AbfsRestOperation. + * + * Add new operations to HdfsOperationConstants file. + * + * PrimaryRequestId can be enabled for individual Hadoop API that invoke + * multiple Store calls. + * + * Testing: + * Pass an instance of TracingHeaderValidator to registerListener() of ABFS + * filesystem/stream class before calling the API in tests. + */ + +public class TracingContext { + private final String clientCorrelationID; // passed over config by client + private final String fileSystemID; // GUID for fileSystem instance + private String clientRequestId = EMPTY_STRING; // GUID per http request + //Optional, non-empty for methods that trigger two or more Store calls + private String primaryRequestId; + private String streamID; // appears per stream instance (read/write ops) + private int retryCount; // retry number as recorded by AbfsRestOperation + private FSOperationType opType; // two-lettered code representing Hadoop op + private final TracingHeaderFormat format; // header ID display options + private Listener listener = null; // null except when testing + //final concatenated ID list set into x-ms-client-request-id header + private String header = EMPTY_STRING; + + private static final Logger LOG = LoggerFactory.getLogger(AbfsClient.class); + public static final int MAX_CLIENT_CORRELATION_ID_LENGTH = 72; + public static final String CLIENT_CORRELATION_ID_PATTERN = "[a-zA-Z0-9-]*"; + + /** + * Initialize TracingContext + * @param clientCorrelationID Provided over config by client + * @param fileSystemID Unique guid for AzureBlobFileSystem instance + * @param opType Code indicating the high-level Hadoop operation that + * triggered the current Store request + * @param tracingHeaderFormat Format of IDs to be printed in header and logs + * @param listener Holds instance of TracingHeaderValidator during testing, + * null otherwise + */ + public TracingContext(String clientCorrelationID, String fileSystemID, + FSOperationType opType, TracingHeaderFormat tracingHeaderFormat, + Listener listener) { + this.fileSystemID = fileSystemID; + this.opType = opType; + this.clientCorrelationID = clientCorrelationID; + streamID = EMPTY_STRING; + retryCount = 0; + primaryRequestId = EMPTY_STRING; + format = tracingHeaderFormat; + this.listener = listener; + } + + public TracingContext(String clientCorrelationID, String fileSystemID, + FSOperationType opType, boolean needsPrimaryReqId, + TracingHeaderFormat tracingHeaderFormat, Listener listener) { + this(clientCorrelationID, fileSystemID, opType, tracingHeaderFormat, + listener); + primaryRequestId = needsPrimaryReqId ? UUID.randomUUID().toString() : ""; + if (listener != null) { + listener.updatePrimaryRequestID(primaryRequestId); + } + } + + public TracingContext(TracingContext originalTracingContext) { + this.fileSystemID = originalTracingContext.fileSystemID; + this.streamID = originalTracingContext.streamID; + this.clientCorrelationID = originalTracingContext.clientCorrelationID; + this.opType = originalTracingContext.opType; + this.retryCount = 0; + this.primaryRequestId = originalTracingContext.primaryRequestId; + this.format = originalTracingContext.format; + if (originalTracingContext.listener != null) { + this.listener = originalTracingContext.listener.getClone(); + } + } + + public static String validateClientCorrelationID(String clientCorrelationID) { + if ((clientCorrelationID.length() > MAX_CLIENT_CORRELATION_ID_LENGTH) + || (!clientCorrelationID.matches(CLIENT_CORRELATION_ID_PATTERN))) { + LOG.debug( + "Invalid config provided; correlation id not included in header."); + return EMPTY_STRING; + } + return clientCorrelationID; + } + + public void setPrimaryRequestID() { + primaryRequestId = UUID.randomUUID().toString(); + if (listener != null) { + listener.updatePrimaryRequestID(primaryRequestId); + } + } + + public void setStreamID(String stream) { + streamID = stream; + } + + public void setOperation(FSOperationType operation) { + this.opType = operation; + } + + public void setRetryCount(int retryCount) { + this.retryCount = retryCount; + } + + public void setListener(Listener listener) { + this.listener = listener; + } + + /** + * Concatenate all identifiers separated by (:) into a string and set into + * X_MS_CLIENT_REQUEST_ID header of the http operation + * @param httpOperation AbfsHttpOperation instance to set header into + * connection + */ + public void constructHeader(AbfsHttpOperation httpOperation) { + clientRequestId = UUID.randomUUID().toString(); + switch (format) { + case ALL_ID_FORMAT: // Optional IDs (e.g. streamId) may be empty + header = + clientCorrelationID + ":" + clientRequestId + ":" + fileSystemID + ":" + + primaryRequestId + ":" + streamID + ":" + opType + ":" + + retryCount; + break; + case TWO_ID_FORMAT: + header = clientCorrelationID + ":" + clientRequestId; + break; + default: + header = clientRequestId; //case SINGLE_ID_FORMAT + } + if (listener != null) { //for testing + listener.callTracingHeaderValidator(header, format); + } + httpOperation.setRequestProperty(HttpHeaderConfigurations.X_MS_CLIENT_REQUEST_ID, header); + } + + /** + * Return header representing the request associated with the tracingContext + * @return Header string set into X_MS_CLIENT_REQUEST_ID + */ + public String getHeader() { + return header; + } + +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/TracingHeaderFormat.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/TracingHeaderFormat.java new file mode 100644 index 0000000000000..3f23ae3ed7c14 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/TracingHeaderFormat.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.utils; + +public enum TracingHeaderFormat { + SINGLE_ID_FORMAT, // + + TWO_ID_FORMAT, // : + + ALL_ID_FORMAT; // :: + // :::: +} diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md index 8724b97ab31d4..dfb7f3f42a5cf 100644 --- a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md +++ b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md @@ -338,7 +338,7 @@ with the following configurations. retries. Default value is 5. * `fs.azure.oauth.token.fetch.retry.min.backoff.interval`: Minimum back-off interval. Added to the retry interval computed from delta backoff. By - default this si set as 0. Set the interval in milli seconds. + default this is set as 0. Set the interval in milli seconds. * `fs.azure.oauth.token.fetch.retry.max.backoff.interval`: Maximum back-off interval. Default value is 60000 (sixty seconds). Set the interval in milli seconds. @@ -729,6 +729,28 @@ Consult the javadocs for `org.apache.hadoop.fs.azurebfs.constants.ConfigurationK `org.apache.hadoop.fs.azurebfs.AbfsConfiguration` for the full list of configuration options and their default values. +### Client Correlation Options + +#### 1. Client CorrelationId Option + +Config `fs.azure.client.correlationid` provides an option to correlate client +requests using this client-provided identifier. This Id will be visible in Azure +Storage Analytics logs in the `request-id-header` field. +Reference: [Storage Analytics log format](https://docs.microsoft.com/en-us/rest/api/storageservices/storage-analytics-log-format) + +This config accepts a string which can be maximum of 72 characters and should +contain alphanumeric characters and/or hyphens only. Defaults to empty string if +input is invalid. + +#### 1. Correlation IDs Display Options + +Config `fs.azure.tracingcontext.format` provides an option to select the format +of IDs included in the `request-id-header`. This config accepts a String value +corresponding to the following enum options. + `SINGLE_ID_FORMAT` : clientRequestId + `ALL_ID_FORMAT` : all IDs (default) + `TWO_ID_FORMAT` : clientCorrelationId:clientRequestId + ### Flush Options #### 1. Azure Blob File System Flush Options @@ -778,9 +800,31 @@ The following configs are related to read and write operations. `fs.azure.io.retry.max.retries`: Sets the number of retries for IO operations. Currently this is used only for the server call retry logic. Used within -AbfsClient class as part of the ExponentialRetryPolicy. The value should be +`AbfsClient` class as part of the ExponentialRetryPolicy. The value should be greater than or equal to 0. +`fs.azure.io.retry.min.backoff.interval`: Sets the minimum backoff interval for +retries of IO operations. Currently this is used only for the server call retry +logic. Used within `AbfsClient` class as part of the ExponentialRetryPolicy. This +value indicates the smallest interval (in milliseconds) to wait before retrying +an IO operation. The default value is 3000 (3 seconds). + +`fs.azure.io.retry.max.backoff.interval`: Sets the maximum backoff interval for +retries of IO operations. Currently this is used only for the server call retry +logic. Used within `AbfsClient` class as part of the ExponentialRetryPolicy. This +value indicates the largest interval (in milliseconds) to wait before retrying +an IO operation. The default value is 30000 (30 seconds). + +`fs.azure.io.retry.backoff.interval`: Sets the default backoff interval for +retries of IO operations. Currently this is used only for the server call retry +logic. Used within `AbfsClient` class as part of the ExponentialRetryPolicy. This +value is used to compute a random delta between 80% and 120% of the specified +value. This random delta is then multiplied by an exponent of the current IO +retry number (i.e., the default is multiplied by `2^(retryNum - 1)`) and then +contstrained within the range of [`fs.azure.io.retry.min.backoff.interval`, +`fs.azure.io.retry.max.backoff.interval`] to determine the amount of time to +wait before the next IO retry attempt. The default value is 3000 (3 seconds). + `fs.azure.write.request.size`: To set the write buffer size. Specify the value in bytes. The value should be between 16384 to 104857600 both inclusive (16 KB to 100 MB). The default value will be 8388608 (8 MB). @@ -837,7 +881,7 @@ when there are too many writes from the same process. ### Security Options `fs.azure.always.use.https`: Enforces to use HTTPS instead of HTTP when the flag -is made true. Irrespective of the flag, AbfsClient will use HTTPS if the secure +is made true. Irrespective of the flag, `AbfsClient` will use HTTPS if the secure scheme (ABFSS) is used or OAuth is used for authentication. By default this will be set to true. diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md b/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md index cf3b2344456af..933f86be3e896 100644 --- a/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md +++ b/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md @@ -448,7 +448,7 @@ use requires the presence of secret credentials, where tests may be slow, and where finding out why something failed from nothing but the test output is critical. -#### Subclasses Existing Shared Base Blasses +#### Subclasses Existing Shared Base Classes There are a set of base classes which should be extended for Azure tests and integration tests. @@ -602,7 +602,7 @@ various test combinations, it will: 2. Run tests for all combinations 3. Summarize results across all the test combination runs. -As a pre-requiste step, fill config values for test accounts and credentials +As a pre-requisite step, fill config values for test accounts and credentials needed for authentication in `src/test/resources/azure-auth-keys.xml.template` and rename as `src/test/resources/azure-auth-keys.xml`. diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java index 3befee48493fc..ae24cf4a107cd 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java @@ -26,15 +26,16 @@ import java.util.concurrent.Callable; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; import org.apache.hadoop.fs.azurebfs.security.AbfsDelegationTokenManager; import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; @@ -45,6 +46,8 @@ import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes; import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; +import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderFormat; import org.apache.hadoop.fs.azurebfs.utils.UriUtils; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.permission.FsPermission; @@ -81,6 +84,7 @@ public abstract class AbstractAbfsIntegrationTest extends private AuthType authType; private boolean useConfiguredFileSystem = false; private boolean usingFilesystemForSASTests = false; + private static final int SHORTENED_GUID_LEN = 12; protected AbstractAbfsIntegrationTest() throws Exception { fileSystemName = TEST_CONTAINER_PREFIX + UUID.randomUUID().toString(); @@ -139,17 +143,42 @@ protected AbstractAbfsIntegrationTest() throws Exception { } } + protected boolean getIsNamespaceEnabled(AzureBlobFileSystem fs) + throws IOException { + return fs.getIsNamespaceEnabled(getTestTracingContext(fs, false)); + } + + public TracingContext getTestTracingContext(AzureBlobFileSystem fs, + boolean needsPrimaryReqId) { + String correlationId, fsId; + TracingHeaderFormat format; + if (fs == null) { + correlationId = "test-corr-id"; + fsId = "test-filesystem-id"; + format = TracingHeaderFormat.ALL_ID_FORMAT; + } else { + AbfsConfiguration abfsConf = fs.getAbfsStore().getAbfsConfiguration(); + correlationId = abfsConf.getClientCorrelationId(); + fsId = fs.getFileSystemId(); + format = abfsConf.getTracingHeaderFormat(); + } + return new TracingContext(correlationId, fsId, + FSOperationType.TEST_OP, needsPrimaryReqId, format, null); + } + @Before public void setup() throws Exception { //Create filesystem first to make sure getWasbFileSystem() can return an existing filesystem. createFileSystem(); - // Only live account without namespace support can run ABFS&WASB compatibility tests - if (!isIPAddress - && (abfsConfig.getAuthType(accountName) != AuthType.SAS) - && !abfs.getIsNamespaceEnabled()) { - final URI wasbUri = new URI(abfsUrlToWasbUrl(getTestUrl(), abfsConfig.isHttpsAlwaysUsed())); + // Only live account without namespace support can run ABFS&WASB + // compatibility tests + if (!isIPAddress && (abfsConfig.getAuthType(accountName) != AuthType.SAS) + && !abfs.getIsNamespaceEnabled(getTestTracingContext( + getFileSystem(), false))) { + final URI wasbUri = new URI( + abfsUrlToWasbUrl(getTestUrl(), abfsConfig.isHttpsAlwaysUsed())); final AzureNativeFileSystemStore azureNativeFileSystemStore = new AzureNativeFileSystemStore(); @@ -180,22 +209,23 @@ public void teardown() throws Exception { if (abfs == null) { return; } + TracingContext tracingContext = getTestTracingContext(getFileSystem(), false); if (usingFilesystemForSASTests) { abfsConfig.set(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SharedKey.name()); AzureBlobFileSystem tempFs = (AzureBlobFileSystem) FileSystem.newInstance(rawConfig); - tempFs.getAbfsStore().deleteFilesystem(); + tempFs.getAbfsStore().deleteFilesystem(tracingContext); } else if (!useConfiguredFileSystem) { // Delete all uniquely created filesystem from the account final AzureBlobFileSystemStore abfsStore = abfs.getAbfsStore(); - abfsStore.deleteFilesystem(); + abfsStore.deleteFilesystem(tracingContext); AbfsRestOperationException ex = intercept(AbfsRestOperationException.class, new Callable>() { @Override public Hashtable call() throws Exception { - return abfsStore.getFilesystemProperties(); + return abfsStore.getFilesystemProperties(tracingContext); } }); if (FILE_SYSTEM_NOT_FOUND.getStatusCode() != ex.getStatusCode()) { @@ -241,7 +271,8 @@ protected void createFilesystemForSASTests() throws Exception { // so first create temporary instance of the filesystem using SharedKey // then re-use the filesystem it creates with SAS auth instead of SharedKey. AzureBlobFileSystem tempFs = (AzureBlobFileSystem) FileSystem.newInstance(rawConfig); - Assert.assertTrue(tempFs.exists(new Path("/"))); + ContractTestUtils.assertPathExists(tempFs, "This path should exist", + new Path("/")); abfsConfig.set(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SAS.name()); usingFilesystemForSASTests = true; } @@ -411,7 +442,20 @@ public Path makeQualified(Path path) throws java.io.IOException { */ protected Path path(String filepath) throws IOException { return getFileSystem().makeQualified( - new Path(getTestPath(), filepath)); + new Path(getTestPath(), getUniquePath(filepath))); + } + + /** + * Generate a unique path using the given filepath. + * @param filepath path string + * @return unique path created from filepath and a GUID + */ + protected Path getUniquePath(String filepath) { + if (filepath.equals("/")) { + return new Path(filepath); + } + return new Path(filepath + StringUtils + .right(UUID.randomUUID().toString(), SHORTENED_GUID_LEN)); } /** @@ -439,7 +483,8 @@ protected AbfsOutputStream createAbfsOutputStreamWithFlushEnabled( abfss.getAbfsConfiguration().setDisableOutputStreamFlush(false); return (AbfsOutputStream) abfss.createFile(path, fs.getFsStatistics(), - true, FsPermission.getDefault(), FsPermission.getUMask(fs.getConf())); + true, FsPermission.getDefault(), FsPermission.getUMask(fs.getConf()), + getTestTracingContext(fs, false)); } /** diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java index a4d645899049f..f90d410343532 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java @@ -62,7 +62,9 @@ public void testContinuationTokenHavingEqualSign() throws Exception { AbfsClient abfsClient = fs.getAbfsClient(); try { - AbfsRestOperation op = abfsClient.listPath("/", true, LIST_MAX_RESULTS, "==========="); + AbfsRestOperation op = abfsClient + .listPath("/", true, LIST_MAX_RESULTS, "===========", + getTestTracingContext(fs, true)); Assert.assertTrue(false); } catch (AbfsRestOperationException ex) { Assert.assertEquals("InvalidQueryParameterValue", ex.getErrorCode().getErrorCode()); @@ -91,7 +93,7 @@ public void testUnknownHost() throws Exception { public void testListPathWithValidListMaxResultsValues() throws IOException, ExecutionException, InterruptedException { final int fileCount = 10; - final String directory = "testWithValidListMaxResultsValues"; + final Path directory = getUniquePath("testWithValidListMaxResultsValues"); createDirectoryWithNFiles(directory, fileCount); final int[] testData = {fileCount + 100, fileCount + 1, fileCount, fileCount - 1, 1}; @@ -100,7 +102,7 @@ public void testListPathWithValidListMaxResultsValues() setListMaxResults(listMaxResults); int expectedListResultsSize = listMaxResults > fileCount ? fileCount : listMaxResults; - Assertions.assertThat(listPath(directory)).describedAs( + Assertions.assertThat(listPath(directory.toString())).describedAs( "AbfsClient.listPath result should contain %d items when " + "listMaxResults is %d and directory contains %d items", expectedListResultsSize, listMaxResults, fileCount) @@ -112,9 +114,10 @@ public void testListPathWithValidListMaxResultsValues() public void testListPathWithValueGreaterThanServerMaximum() throws IOException, ExecutionException, InterruptedException { setListMaxResults(LIST_MAX_RESULTS_SERVER + 100); - final String directory = "testWithValueGreaterThanServerMaximum"; + final Path directory = getUniquePath( + "testWithValueGreaterThanServerMaximum"); createDirectoryWithNFiles(directory, LIST_MAX_RESULTS_SERVER + 200); - Assertions.assertThat(listPath(directory)).describedAs( + Assertions.assertThat(listPath(directory.toString())).describedAs( "AbfsClient.listPath result will contain a maximum of %d items " + "even if listMaxResults >= %d or directory " + "contains more than %d items", LIST_MAX_RESULTS_SERVER, @@ -135,7 +138,8 @@ public void testListPathWithInvalidListMaxResultsValues() throws Exception { private List listPath(String directory) throws IOException { return getFileSystem().getAbfsClient() - .listPath(directory, false, getListMaxResults(), null).getResult() + .listPath(directory, false, getListMaxResults(), null, + getTestTracingContext(getFileSystem(), true)).getResult() .getListResultSchema().paths(); } @@ -149,7 +153,7 @@ private void setListMaxResults(int listMaxResults) throws IOException { .setListMaxResults(listMaxResults); } - private void createDirectoryWithNFiles(String directory, int n) + private void createDirectoryWithNFiles(Path directory, int n) throws ExecutionException, InterruptedException { final List> tasks = new ArrayList<>(); ExecutorService es = Executors.newFixedThreadPool(10); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsDurationTrackers.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsDurationTrackers.java index c8b687d233cb5..0997b3dbd44d4 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsDurationTrackers.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsDurationTrackers.java @@ -76,7 +76,8 @@ public void testAbfsHttpCallsDurations() throws IOException { out.hflush(); // GET the file. - in = fs.getAbfsStore().openFileForRead(testFilePath, fs.getFsStatistics()); + in = fs.getAbfsStore().openFileForRead(testFilePath, + fs.getFsStatistics(), getTestTracingContext(fs, false)); int res = in.read(); LOG.info("Result of Read: {}", res); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java index 0e1eb769ce77b..d96f1a283609f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java @@ -68,7 +68,8 @@ public void testInitValues() throws IOException { try { outputStream = createAbfsOutputStreamWithFlushEnabled(fs, initValuesPath); - inputStream = abfss.openFileForRead(initValuesPath, fs.getFsStatistics()); + inputStream = abfss.openFileForRead(initValuesPath, fs.getFsStatistics(), + getTestTracingContext(fs, false)); AbfsInputStreamStatisticsImpl stats = (AbfsInputStreamStatisticsImpl) inputStream.getStreamStatistics(); @@ -112,7 +113,8 @@ public void testSeekStatistics() throws IOException { //Writing a default buffer in a file. out.write(defBuffer); out.hflush(); - in = abfss.openFileForRead(seekStatPath, fs.getFsStatistics()); + in = abfss.openFileForRead(seekStatPath, fs.getFsStatistics(), + getTestTracingContext(fs, false)); /* * Writing 1MB buffer to the file, this would make the fCursor(Current @@ -203,7 +205,8 @@ public void testReadStatistics() throws IOException { */ out.write(defBuffer); out.hflush(); - in = abfss.openFileForRead(readStatPath, fs.getFsStatistics()); + in = abfss.openFileForRead(readStatPath, fs.getFsStatistics(), + getTestTracingContext(fs, false)); /* * Doing file read 10 times. @@ -275,14 +278,15 @@ public void testWithNullStreamStatistics() throws IOException { out.hflush(); // AbfsRestOperation Instance required for eTag. - AbfsRestOperation abfsRestOperation = - fs.getAbfsClient().getPathStatus(nullStatFilePath.toUri().getPath(), false); + AbfsRestOperation abfsRestOperation = fs.getAbfsClient() + .getPathStatus(nullStatFilePath.toUri().getPath(), false, + getTestTracingContext(fs, false)); // AbfsInputStream with no StreamStatistics. in = new AbfsInputStream(fs.getAbfsClient(), null, - nullStatFilePath.toUri().getPath(), ONE_KB, - abfsInputStreamContext, - abfsRestOperation.getResult().getResponseHeader("ETag")); + nullStatFilePath.toUri().getPath(), ONE_KB, abfsInputStreamContext, + abfsRestOperation.getResult().getResponseHeader("ETag"), + getTestTracingContext(fs, false)); // Verifying that AbfsInputStream Operations works with null statistics. assertNotEquals("AbfsInputStream read() with null statistics should " @@ -325,7 +329,8 @@ public void testReadAheadCounters() throws IOException { out.write(defBuffer); out.close(); - in = abfss.openFileForRead(readAheadCountersPath, fs.getFsStatistics()); + in = abfss.openFileForRead(readAheadCountersPath, fs.getFsStatistics(), + getTestTracingContext(fs, false)); /* * Reading 1KB after each i * KB positions. Hence the reads are from 0 @@ -392,7 +397,8 @@ public void testActionHttpGetRequest() throws IOException { abfsOutputStream.hflush(); abfsInputStream = - abfss.openFileForRead(actionHttpGetRequestPath, fs.getFsStatistics()); + abfss.openFileForRead(actionHttpGetRequestPath, + fs.getFsStatistics(), getTestTracingContext(fs, false)); abfsInputStream.read(); IOStatistics ioStatistics = extractStatistics(fs); LOG.info("AbfsInputStreamStats info: {}", diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java index 6d5e4cf3bce2d..9e81a0127b6cb 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.azurebfs.services.AbfsListStatusRemoteIterator; import org.apache.hadoop.fs.azurebfs.services.ListingSupport; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; @@ -64,7 +65,8 @@ public void testAbfsIteratorWithHasNext() throws Exception { ListingSupport listngSupport = Mockito.spy(getFileSystem().getAbfsStore()); RemoteIterator fsItr = new AbfsListStatusRemoteIterator( - getFileSystem().getFileStatus(testDir), listngSupport); + getFileSystem().getFileStatus(testDir), listngSupport, + getTestTracingContext(getFileSystem(), true)); Assertions.assertThat(fsItr) .describedAs("RemoteIterator should be instance of " + "AbfsListStatusRemoteIterator by default") @@ -88,7 +90,8 @@ public void testAbfsIteratorWithHasNext() throws Exception { verify(listngSupport, Mockito.atLeast(minNumberOfInvokations)) .listStatus(any(Path.class), nullable(String.class), anyList(), anyBoolean(), - nullable(String.class)); + nullable(String.class), + any(TracingContext.class)); } @Test @@ -100,7 +103,8 @@ public void testAbfsIteratorWithoutHasNext() throws Exception { ListingSupport listngSupport = Mockito.spy(getFileSystem().getAbfsStore()); RemoteIterator fsItr = new AbfsListStatusRemoteIterator( - getFileSystem().getFileStatus(testDir), listngSupport); + getFileSystem().getFileStatus(testDir), listngSupport, + getTestTracingContext(getFileSystem(), true)); Assertions.assertThat(fsItr) .describedAs("RemoteIterator should be instance of " + "AbfsListStatusRemoteIterator by default") @@ -129,7 +133,8 @@ public void testAbfsIteratorWithoutHasNext() throws Exception { verify(listngSupport, Mockito.atLeast(minNumberOfInvokations)) .listStatus(any(Path.class), nullable(String.class), anyList(), anyBoolean(), - nullable(String.class)); + nullable(String.class), + any(TracingContext.class)); } @Test @@ -205,7 +210,8 @@ public void testNextWhenNoMoreElementsPresent() throws Exception { setPageSize(10); RemoteIterator fsItr = new AbfsListStatusRemoteIterator(getFileSystem().getFileStatus(testDir), - getFileSystem().getAbfsStore()); + getFileSystem().getAbfsStore(), + getTestTracingContext(getFileSystem(), true)); fsItr = Mockito.spy(fsItr); Mockito.doReturn(false).when(fsItr).hasNext(); @@ -231,8 +237,8 @@ public void testHasNextForEmptyDir() throws Exception { @Test public void testHasNextForFile() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - String testFileName = "testFile"; - Path testFile = new Path(testFileName); + Path testFile = path("testFile"); + String testFileName = testFile.toString(); getFileSystem().create(testFile); setPageSize(10); RemoteIterator fsItr = fs.listStatusIterator(testFile); @@ -253,7 +259,7 @@ public void testIOException() throws Exception { ListingSupport lsSupport =getMockListingSupport(exceptionMessage); RemoteIterator fsItr = new AbfsListStatusRemoteIterator(getFileSystem().getFileStatus(testDir), - lsSupport); + lsSupport, getTestTracingContext(getFileSystem(), true)); Assertions.assertThatThrownBy(() -> fsItr.next()) .describedAs( @@ -276,19 +282,20 @@ public void testNonExistingPath() throws Throwable { private ListingSupport getMockListingSupport(String exceptionMessage) { return new ListingSupport() { @Override - public FileStatus[] listStatus(Path path) throws IOException { + public FileStatus[] listStatus(Path path, TracingContext tracingContext) throws IOException { return null; } @Override - public FileStatus[] listStatus(Path path, String startFrom) + public FileStatus[] listStatus(Path path, String startFrom, TracingContext tracingContext) throws IOException { return null; } @Override public String listStatus(Path path, String startFrom, - List fileStatuses, boolean fetchAll, String continuation) + List fileStatuses, boolean fetchAll, + String continuation, TracingContext tracingContext) throws IOException { throw new IOException(exceptionMessage); } @@ -297,7 +304,7 @@ public String listStatus(Path path, String startFrom, private Path createTestDirectory() throws IOException { String testDirectoryName = "testDirectory" + System.currentTimeMillis(); - Path testDirectory = new Path(testDirectoryName); + Path testDirectory = path(testDirectoryName); getFileSystem().mkdirs(testDirectory); return testDirectory; } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java index 52abb097ef311..b0e82444afb34 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java @@ -28,6 +28,10 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; +import org.apache.hadoop.fs.azurebfs.services.AbfsInputStream; +import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; +import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderValidator; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.APPENDBLOB_MAX_WRITE_BUFFER_SIZE; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_READ_BUFFER_SIZE; @@ -41,7 +45,7 @@ */ @RunWith(Parameterized.class) public class ITestAbfsReadWriteAndSeek extends AbstractAbfsScaleTest { - private static final Path TEST_PATH = new Path("/testfile"); + private static final String TEST_PATH = "/testfile"; @Parameterized.Parameters(name = "Size={0}") public static Iterable sizes() { @@ -65,28 +69,73 @@ public void testReadAndWriteWithDifferentBufferSizesAndSeek() throws Exception { private void testReadWriteAndSeek(int bufferSize) throws Exception { final AzureBlobFileSystem fs = getFileSystem(); final AbfsConfiguration abfsConfiguration = fs.getAbfsStore().getAbfsConfiguration(); - abfsConfiguration.setWriteBufferSize(bufferSize); abfsConfiguration.setReadBufferSize(bufferSize); - final byte[] b = new byte[2 * bufferSize]; new Random().nextBytes(b); - try (FSDataOutputStream stream = fs.create(TEST_PATH)) { + Path testPath = path(TEST_PATH); + try (FSDataOutputStream stream = fs.create(testPath)) { stream.write(b); } final byte[] readBuffer = new byte[2 * bufferSize]; int result; - try (FSDataInputStream inputStream = fs.open(TEST_PATH)) { + try (FSDataInputStream inputStream = fs.open(testPath)) { + ((AbfsInputStream) inputStream.getWrappedStream()).registerListener( + new TracingHeaderValidator(abfsConfiguration.getClientCorrelationId(), + fs.getFileSystemId(), FSOperationType.READ, true, 0, + ((AbfsInputStream) inputStream.getWrappedStream()) + .getStreamID())); inputStream.seek(bufferSize); result = inputStream.read(readBuffer, bufferSize, bufferSize); assertNotEquals(-1, result); + + //to test tracingHeader for case with bypassReadAhead == true + inputStream.seek(0); + byte[] temp = new byte[5]; + int t = inputStream.read(temp, 0, 1); + inputStream.seek(0); result = inputStream.read(readBuffer, 0, bufferSize); } assertNotEquals("data read in final read()", -1, result); assertArrayEquals(readBuffer, b); } + + @Test + public void testReadAheadRequestID() throws java.io.IOException { + final AzureBlobFileSystem fs = getFileSystem(); + final AbfsConfiguration abfsConfiguration = fs.getAbfsStore().getAbfsConfiguration(); + int bufferSize = MIN_BUFFER_SIZE; + abfsConfiguration.setReadBufferSize(bufferSize); + + final byte[] b = new byte[bufferSize * 10]; + new Random().nextBytes(b); + Path testPath = path(TEST_PATH); + try (FSDataOutputStream stream = fs.create(testPath)) { + ((AbfsOutputStream) stream.getWrappedStream()).registerListener( + new TracingHeaderValidator(abfsConfiguration.getClientCorrelationId(), + fs.getFileSystemId(), FSOperationType.WRITE, false, 0, + ((AbfsOutputStream) stream.getWrappedStream()) + .getStreamID())); + stream.write(b); + } + + final byte[] readBuffer = new byte[4 * bufferSize]; + int result; + fs.registerListener( + new TracingHeaderValidator(abfsConfiguration.getClientCorrelationId(), + fs.getFileSystemId(), FSOperationType.OPEN, false, 0)); + try (FSDataInputStream inputStream = fs.open(testPath)) { + ((AbfsInputStream) inputStream.getWrappedStream()).registerListener( + new TracingHeaderValidator(abfsConfiguration.getClientCorrelationId(), + fs.getFileSystemId(), FSOperationType.READ, false, 0, + ((AbfsInputStream) inputStream.getWrappedStream()) + .getStreamID())); + result = inputStream.read(readBuffer, 0, bufferSize*4); + } + fs.registerListener(null); + } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java index e6b572de9717c..98162fee08e9f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java @@ -91,7 +91,7 @@ public void testCreateStatistics() throws IOException { fs.mkdirs(createDirectoryPath); fs.createNonRecursive(createFilePath, FsPermission - .getDefault(), false, 1024, (short) 1, 1024, null); + .getDefault(), false, 1024, (short) 1, 1024, null).close(); Map metricMap = fs.getInstrumentationMap(); /* @@ -117,7 +117,7 @@ public void testCreateStatistics() throws IOException { fs.mkdirs(path(getMethodName() + "Dir" + i)); fs.createNonRecursive(path(getMethodName() + i), FsPermission.getDefault(), false, 1024, (short) 1, - 1024, null); + 1024, null).close(); } metricMap = fs.getInstrumentationMap(); @@ -160,7 +160,7 @@ public void testDeleteStatistics() throws IOException { files_deleted counters. */ fs.mkdirs(createDirectoryPath); - fs.create(path(createDirectoryPath + getMethodName())); + fs.create(path(createDirectoryPath + getMethodName())).close(); fs.delete(createDirectoryPath, true); Map metricMap = fs.getInstrumentationMap(); @@ -179,7 +179,7 @@ public void testDeleteStatistics() throws IOException { directories_deleted is called or not. */ fs.mkdirs(createDirectoryPath); - fs.create(createFilePath); + fs.create(createFilePath).close(); fs.delete(createDirectoryPath, true); metricMap = fs.getInstrumentationMap(); @@ -199,9 +199,9 @@ public void testOpenAppendRenameExists() throws IOException { Path createFilePath = path(getMethodName()); Path destCreateFilePath = path(getMethodName() + "New"); - fs.create(createFilePath); - fs.open(createFilePath); - fs.append(createFilePath); + fs.create(createFilePath).close(); + fs.open(createFilePath).close(); + fs.append(createFilePath).close(); assertTrue(fs.rename(createFilePath, destCreateFilePath)); Map metricMap = fs.getInstrumentationMap(); @@ -225,11 +225,11 @@ public void testOpenAppendRenameExists() throws IOException { //re-initialising Abfs to reset statistic values. fs.initialize(fs.getUri(), fs.getConf()); - fs.create(destCreateFilePath); + fs.create(destCreateFilePath).close(); for (int i = 0; i < NUMBER_OF_OPS; i++) { fs.open(destCreateFilePath); - fs.append(destCreateFilePath); + fs.append(destCreateFilePath).close(); } metricMap = fs.getInstrumentationMap(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java index 7eadb4bb8ff23..e5f182df2a1a2 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java @@ -52,8 +52,8 @@ public void testAbfsStreamOps() throws Exception { + "Abfs"); final AzureBlobFileSystem fs = getFileSystem(); - Path smallOperationsFile = new Path("testOneReadWriteOps"); - Path largeOperationsFile = new Path("testLargeReadWriteOps"); + Path smallOperationsFile = path("testOneReadWriteOps"); + Path largeOperationsFile = path("testLargeReadWriteOps"); FileSystem.Statistics statistics = fs.getFsStatistics(); String testReadWriteOps = "test this"; statistics.reset(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java index cbe19396d1277..dbe4b42a67df3 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java @@ -19,12 +19,15 @@ package org.apache.hadoop.fs.azurebfs; import java.io.FileNotFoundException; +import java.io.IOException; import java.util.Random; import org.junit.Test; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; +import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderValidator; import org.apache.hadoop.fs.contract.ContractTestUtils; /** @@ -32,8 +35,8 @@ */ public class ITestAzureBlobFileSystemAppend extends AbstractAbfsIntegrationTest { - private static final Path TEST_FILE_PATH = new Path("testfile"); - private static final Path TEST_FOLDER_PATH = new Path("testFolder"); + private static final String TEST_FILE_PATH = "testfile"; + private static final String TEST_FOLDER_PATH = "testFolder"; public ITestAzureBlobFileSystemAppend() throws Exception { super(); @@ -42,15 +45,15 @@ public ITestAzureBlobFileSystemAppend() throws Exception { @Test(expected = FileNotFoundException.class) public void testAppendDirShouldFail() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path filePath = TEST_FILE_PATH; + final Path filePath = path(TEST_FILE_PATH); fs.mkdirs(filePath); - fs.append(filePath, 0); + fs.append(filePath, 0).close(); } @Test public void testAppendWithLength0() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - try(FSDataOutputStream stream = fs.create(TEST_FILE_PATH)) { + try(FSDataOutputStream stream = fs.create(path(TEST_FILE_PATH))) { final byte[] b = new byte[1024]; new Random().nextBytes(b); stream.write(b, 1000, 0); @@ -62,18 +65,29 @@ public void testAppendWithLength0() throws Exception { @Test(expected = FileNotFoundException.class) public void testAppendFileAfterDelete() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path filePath = TEST_FILE_PATH; + final Path filePath = path(TEST_FILE_PATH); ContractTestUtils.touch(fs, filePath); fs.delete(filePath, false); - fs.append(filePath); + fs.append(filePath).close(); } @Test(expected = FileNotFoundException.class) public void testAppendDirectory() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path folderPath = TEST_FOLDER_PATH; + final Path folderPath = path(TEST_FOLDER_PATH); fs.mkdirs(folderPath); - fs.append(folderPath); + fs.append(folderPath).close(); + } + + @Test + public void testTracingForAppend() throws IOException { + AzureBlobFileSystem fs = getFileSystem(); + Path testPath = path(TEST_FILE_PATH); + fs.create(testPath).close(); + fs.registerListener(new TracingHeaderValidator( + fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), + fs.getFileSystemId(), FSOperationType.APPEND, false, 0)); + fs.append(testPath, 10); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAttributes.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAttributes.java index cc86923357aa5..beb7d0ebaaa8e 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAttributes.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAttributes.java @@ -21,11 +21,14 @@ import java.io.IOException; import java.util.EnumSet; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.XAttrSetFlag; import org.junit.Assume; import org.junit.Test; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; +import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderValidator; + import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** @@ -42,7 +45,8 @@ public ITestAzureBlobFileSystemAttributes() throws Exception { @Test public void testSetGetXAttr() throws Exception { AzureBlobFileSystem fs = getFileSystem(); - Assume.assumeTrue(fs.getIsNamespaceEnabled()); + AbfsConfiguration conf = fs.getAbfsStore().getAbfsConfiguration(); + Assume.assumeTrue(getIsNamespaceEnabled(fs)); byte[] attributeValue1 = fs.getAbfsStore().encodeAttribute("hi"); byte[] attributeValue2 = fs.getAbfsStore().encodeAttribute("你好"); @@ -55,8 +59,13 @@ public void testSetGetXAttr() throws Exception { assertNull(fs.getXAttr(testFile, attributeName1)); // after setting the xAttr on the file, the value should be retrievable + fs.registerListener( + new TracingHeaderValidator(conf.getClientCorrelationId(), + fs.getFileSystemId(), FSOperationType.SET_ATTR, true, 0)); fs.setXAttr(testFile, attributeName1, attributeValue1); + fs.setListenerOperation(FSOperationType.GET_ATTR); assertArrayEquals(attributeValue1, fs.getXAttr(testFile, attributeName1)); + fs.registerListener(null); // after setting a second xAttr on the file, the first xAttr values should not be overwritten fs.setXAttr(testFile, attributeName2, attributeValue2); @@ -67,7 +76,7 @@ public void testSetGetXAttr() throws Exception { @Test public void testSetGetXAttrCreateReplace() throws Exception { AzureBlobFileSystem fs = getFileSystem(); - Assume.assumeTrue(fs.getIsNamespaceEnabled()); + Assume.assumeTrue(getIsNamespaceEnabled(fs)); byte[] attributeValue = fs.getAbfsStore().encodeAttribute("one"); String attributeName = "user.someAttribute"; Path testFile = path("createReplaceXAttr"); @@ -84,7 +93,7 @@ public void testSetGetXAttrCreateReplace() throws Exception { @Test public void testSetGetXAttrReplace() throws Exception { AzureBlobFileSystem fs = getFileSystem(); - Assume.assumeTrue(fs.getIsNamespaceEnabled()); + Assume.assumeTrue(getIsNamespaceEnabled(fs)); byte[] attributeValue1 = fs.getAbfsStore().encodeAttribute("one"); byte[] attributeValue2 = fs.getAbfsStore().encodeAttribute("two"); String attributeName = "user.someAttribute"; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java index 1278e652b3bbb..338cf8476afd8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java @@ -99,7 +99,7 @@ public void testSASTokenProviderEmptySASToken() throws Exception { this.getConfiguration().getRawConfiguration()); intercept(SASTokenProviderException.class, () -> { - testFs.create(new org.apache.hadoop.fs.Path("/testFile")); + testFs.create(new org.apache.hadoop.fs.Path("/testFile")).close(); }); } @@ -114,7 +114,7 @@ public void testSASTokenProviderNullSASToken() throws Exception { testFs.initialize(fs.getUri(), this.getConfiguration().getRawConfiguration()); intercept(SASTokenProviderException.class, ()-> { - testFs.create(new org.apache.hadoop.fs.Path("/testFile")); + testFs.create(new org.apache.hadoop.fs.Path("/testFile")).close(); }); } @@ -209,55 +209,55 @@ public void testGetFileStatusUnauthorized() throws Exception { @Test public void testSetOwnerUnauthorized() throws Exception { - Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); + Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); runTest(FileSystemOperations.SetOwner, true); } @Test public void testSetPermissionUnauthorized() throws Exception { - Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); + Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); runTest(FileSystemOperations.SetPermissions, true); } @Test public void testModifyAclEntriesUnauthorized() throws Exception { - Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); + Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); runTest(FileSystemOperations.ModifyAclEntries, true); } @Test public void testRemoveAclEntriesUnauthorized() throws Exception { - Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); + Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); runTest(FileSystemOperations.RemoveAclEntries, true); } @Test public void testRemoveDefaultAclUnauthorized() throws Exception { - Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); + Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); runTest(FileSystemOperations.RemoveDefaultAcl, true); } @Test public void testRemoveAclUnauthorized() throws Exception { - Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); + Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); runTest(FileSystemOperations.RemoveAcl, true); } @Test public void testSetAclUnauthorized() throws Exception { - Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); + Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); runTest(FileSystemOperations.SetAcl, true); } @Test public void testGetAclStatusAuthorized() throws Exception { - Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); + Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); runTest(FileSystemOperations.GetAcl, false); } @Test public void testGetAclStatusUnauthorized() throws Exception { - Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); + Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); runTest(FileSystemOperations.GetAcl, true); } @@ -297,7 +297,7 @@ private void executeOp(Path reqPath, AzureBlobFileSystem fs, fs.listStatus(reqPath); break; case CreatePath: - fs.create(reqPath); + fs.create(reqPath).close(); break; case RenamePath: fs.rename(reqPath, diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java index ffa3c8cf7d8fa..2941b96fefa2e 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java @@ -43,20 +43,23 @@ public ITestAzureBlobFileSystemBackCompat() throws Exception { public void testBlobBackCompat() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); Assume.assumeFalse("This test does not support namespace enabled account", - this.getFileSystem().getIsNamespaceEnabled()); + getIsNamespaceEnabled(getFileSystem())); String storageConnectionString = getBlobConnectionString(); CloudStorageAccount storageAccount = CloudStorageAccount.parse(storageConnectionString); CloudBlobClient blobClient = storageAccount.createCloudBlobClient(); CloudBlobContainer container = blobClient.getContainerReference(this.getFileSystemName()); container.createIfNotExists(); - CloudBlockBlob blockBlob = container.getBlockBlobReference("test/10/10/10"); + Path testPath = getUniquePath("test"); + CloudBlockBlob blockBlob = container + .getBlockBlobReference(testPath + "/10/10/10"); blockBlob.uploadText(""); - blockBlob = container.getBlockBlobReference("test/10/123/3/2/1/3"); + blockBlob = container.getBlockBlobReference(testPath + "/10/123/3/2/1/3"); blockBlob.uploadText(""); - FileStatus[] fileStatuses = fs.listStatus(new Path("/test/10/")); + FileStatus[] fileStatuses = fs + .listStatus(new Path(String.format("/%s/10/", testPath))); assertEquals(2, fileStatuses.length); assertEquals("10", fileStatuses[0].getPath().getName()); assertTrue(fileStatuses[0].isDirectory()); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java index e52071d92e574..3b31fc728bccf 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java @@ -188,7 +188,9 @@ public void testCheckAccessForAccountWithoutNS() throws Exception { // acts as noop AzureBlobFileSystemStore mockAbfsStore = Mockito.mock(AzureBlobFileSystemStore.class); - Mockito.when(mockAbfsStore.getIsNamespaceEnabled()).thenReturn(true); + Mockito.when(mockAbfsStore + .getIsNamespaceEnabled(getTestTracingContext(getFileSystem(), false))) + .thenReturn(true); Field abfsStoreField = AzureBlobFileSystem.class.getDeclaredField( "abfsStore"); abfsStoreField.setAccessible(true); @@ -350,7 +352,8 @@ private void modifyAcl(Path file, String uid, FsAction fsAction) private Path setupTestDirectoryAndUserAccess(String testFileName, FsAction fsAction) throws Exception { - Path file = new Path(TEST_FOLDER_PATH + testFileName); + Path testPath = path(TEST_FOLDER_PATH); + Path file = new Path(testPath + testFileName); file = this.superUserFs.makeQualified(file); this.superUserFs.delete(file, true); this.superUserFs.create(file); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java index 917ee9ce1b07e..aabaf82b622a8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java @@ -53,7 +53,7 @@ public void testCopyFromLocalFileSystem() throws Exception { localFs.delete(localFilePath, true); try { writeString(localFs, localFilePath, "Testing"); - Path dstPath = new Path("copiedFromLocal"); + Path dstPath = path("copiedFromLocal"); assertTrue(FileUtil.copy(localFs, localFilePath, fs, dstPath, false, fs.getConf())); assertIsFile(fs, dstPath); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java index 09304d1ec218d..2f23ac5c5c708 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java @@ -37,12 +37,15 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.ConcurrentWriteOperationDetectedException; import org.apache.hadoop.fs.azurebfs.services.AbfsClient; -import org.apache.hadoop.fs.azurebfs.services.TestAbfsClient; import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation; import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; +import org.apache.hadoop.fs.azurebfs.services.TestAbfsClient; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; +import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderValidator; import static java.net.HttpURLConnection.HTTP_CONFLICT; import static java.net.HttpURLConnection.HTTP_INTERNAL_ERROR; @@ -66,7 +69,7 @@ public class ITestAzureBlobFileSystemCreate extends AbstractAbfsIntegrationTest { private static final Path TEST_FILE_PATH = new Path("testfile"); - private static final Path TEST_FOLDER_PATH = new Path("testFolder"); + private static final String TEST_FOLDER_PATH = "testFolder"; private static final String TEST_CHILD_FILE = "childFile"; public ITestAzureBlobFileSystemCreate() throws Exception { @@ -89,13 +92,19 @@ public void testEnsureFileCreatedImmediately() throws Exception { @SuppressWarnings("deprecation") public void testCreateNonRecursive() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testFile = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = path(TEST_FOLDER_PATH); + Path testFile = new Path(testFolderPath, TEST_CHILD_FILE); try { fs.createNonRecursive(testFile, true, 1024, (short) 1, 1024, null); fail("Should've thrown"); } catch (FileNotFoundException expected) { } - fs.mkdirs(TEST_FOLDER_PATH); + fs.registerListener(new TracingHeaderValidator( + fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), + fs.getFileSystemId(), FSOperationType.MKDIR, false, 0)); + fs.mkdirs(testFolderPath); + fs.registerListener(null); + fs.createNonRecursive(testFile, true, 1024, (short) 1, 1024, null) .close(); assertIsFile(fs, testFile); @@ -105,13 +114,14 @@ public void testCreateNonRecursive() throws Exception { @SuppressWarnings("deprecation") public void testCreateNonRecursive1() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testFile = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = path(TEST_FOLDER_PATH); + Path testFile = new Path(testFolderPath, TEST_CHILD_FILE); try { fs.createNonRecursive(testFile, FsPermission.getDefault(), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), 1024, (short) 1, 1024, null); fail("Should've thrown"); } catch (FileNotFoundException expected) { } - fs.mkdirs(TEST_FOLDER_PATH); + fs.mkdirs(testFolderPath); fs.createNonRecursive(testFile, true, 1024, (short) 1, 1024, null) .close(); assertIsFile(fs, testFile); @@ -123,13 +133,14 @@ public void testCreateNonRecursive1() throws Exception { public void testCreateNonRecursive2() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testFile = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = path(TEST_FOLDER_PATH); + Path testFile = new Path(testFolderPath, TEST_CHILD_FILE); try { fs.createNonRecursive(testFile, FsPermission.getDefault(), false, 1024, (short) 1, 1024, null); fail("Should've thrown"); } catch (FileNotFoundException e) { } - fs.mkdirs(TEST_FOLDER_PATH); + fs.mkdirs(testFolderPath); fs.createNonRecursive(testFile, true, 1024, (short) 1, 1024, null) .close(); assertIsFile(fs, testFile); @@ -141,7 +152,8 @@ public void testCreateNonRecursive2() throws Exception { @Test public void testWriteAfterClose() throws Throwable { final AzureBlobFileSystem fs = getFileSystem(); - Path testPath = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = path(TEST_FOLDER_PATH); + Path testPath = new Path(testFolderPath, TEST_CHILD_FILE); FSDataOutputStream out = fs.create(testPath); out.close(); intercept(IOException.class, () -> out.write('a')); @@ -161,7 +173,8 @@ public void testWriteAfterClose() throws Throwable { @Test public void testTryWithResources() throws Throwable { final AzureBlobFileSystem fs = getFileSystem(); - Path testPath = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = path(TEST_FOLDER_PATH); + Path testPath = new Path(testFolderPath, TEST_CHILD_FILE); try (FSDataOutputStream out = fs.create(testPath)) { out.write('1'); out.hsync(); @@ -194,7 +207,8 @@ public void testTryWithResources() throws Throwable { @Test public void testFilterFSWriteAfterClose() throws Throwable { final AzureBlobFileSystem fs = getFileSystem(); - Path testPath = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = path(TEST_FOLDER_PATH); + Path testPath = new Path(testFolderPath, TEST_CHILD_FILE); FSDataOutputStream out = fs.create(testPath); intercept(FileNotFoundException.class, () -> { @@ -262,8 +276,12 @@ public void testCreateFileOverwrite(boolean enableConditionalCreateOverwrite) fs.getInstrumentationMap()); // Case 2: Not Overwrite - File pre-exists + fs.registerListener(new TracingHeaderValidator( + fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), + fs.getFileSystemId(), FSOperationType.CREATE, false, 0)); intercept(FileAlreadyExistsException.class, () -> fs.create(nonOverwriteFile, false)); + fs.registerListener(null); // One request to server to create path should be issued createRequestCount++; @@ -289,7 +307,11 @@ public void testCreateFileOverwrite(boolean enableConditionalCreateOverwrite) fs.getInstrumentationMap()); // Case 4: Overwrite - File pre-exists + fs.registerListener(new TracingHeaderValidator( + fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), + fs.getFileSystemId(), FSOperationType.CREATE, true, 0)); fs.create(overwriteFilePath, true); + fs.registerListener(null); if (enableConditionalCreateOverwrite) { // Three requests will be sent to server to create path, @@ -346,7 +368,8 @@ public void testNegativeScenariosForCreateOverwriteDisabled() AzureBlobFileSystemStore abfsStore = fs.getAbfsStore(); abfsStore = setAzureBlobSystemStoreField(abfsStore, "client", mockClient); - boolean isNamespaceEnabled = abfsStore.getIsNamespaceEnabled(); + boolean isNamespaceEnabled = abfsStore + .getIsNamespaceEnabled(getTestTracingContext(fs, false)); AbfsRestOperation successOp = mock( AbfsRestOperation.class); @@ -377,14 +400,14 @@ public void testNegativeScenariosForCreateOverwriteDisabled() .createPath(any(String.class), eq(true), eq(false), isNamespaceEnabled ? any(String.class) : eq(null), isNamespaceEnabled ? any(String.class) : eq(null), - any(boolean.class), eq(null)); + any(boolean.class), eq(null), any(TracingContext.class)); doThrow(fileNotFoundResponseEx) // Scn1: GFS fails with Http404 .doThrow(serverErrorResponseEx) // Scn2: GFS fails with Http500 .doReturn(successOp) // Scn3: create overwrite=true fails with Http412 .doReturn(successOp) // Scn4: create overwrite=true fails with Http500 .when(mockClient) - .getPathStatus(any(String.class), eq(false)); + .getPathStatus(any(String.class), eq(false), any(TracingContext.class)); // mock for overwrite=true doThrow( @@ -395,7 +418,7 @@ public void testNegativeScenariosForCreateOverwriteDisabled() .createPath(any(String.class), eq(true), eq(true), isNamespaceEnabled ? any(String.class) : eq(null), isNamespaceEnabled ? any(String.class) : eq(null), - any(boolean.class), eq(null)); + any(boolean.class), eq(null), any(TracingContext.class)); // Scn1: GFS fails with Http404 // Sequence of events expected: @@ -461,7 +484,8 @@ private void validateCreateFileException(final Class ex Path testPath = new Path("testFile"); intercept( exceptionClass, - () -> abfsStore.createFile(testPath, null, true, permission, umask)); + () -> abfsStore.createFile(testPath, null, true, permission, umask, + getTestTracingContext(getFileSystem(), true))); } private AbfsRestOperationException getMockAbfsRestOperationException(int status) { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java index 50ce257b4a844..5cba89ac4a5a6 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java @@ -53,6 +53,8 @@ import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_SAS_TOKEN_PROVIDER_TYPE; import static org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode.AUTHORIZATION_PERMISSION_MISS_MATCH; import static org.apache.hadoop.fs.azurebfs.utils.AclTestHelpers.aclEntry; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT; import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; @@ -223,15 +225,15 @@ public void testRename() throws Exception { stream.writeBytes("hello"); } - assertFalse(fs.exists(destinationPath)); + assertPathDoesNotExist(fs, "This path should not exist", destinationPath); fs.rename(sourcePath, destinationPath); - assertFalse(fs.exists(sourcePath)); - assertTrue(fs.exists(destinationPath)); + assertPathDoesNotExist(fs, "This path should not exist", sourcePath); + assertPathExists(fs, "This path should exist", destinationPath); - assertFalse(fs.exists(destinationDir)); + assertPathDoesNotExist(fs, "This path should not exist", destinationDir); fs.rename(sourceDir, destinationDir); - assertFalse(fs.exists(sourceDir)); - assertTrue(fs.exists(destinationDir)); + assertPathDoesNotExist(fs, "This path should not exist", sourceDir); + assertPathExists(fs, "This path should exist", destinationDir); } @Test @@ -246,13 +248,13 @@ public void testDelete() throws Exception { stream.writeBytes("hello"); } - assertTrue(fs.exists(filePath)); + assertPathExists(fs, "This path should exist", filePath); fs.delete(filePath, false); - assertFalse(fs.exists(filePath)); + assertPathDoesNotExist(fs, "This path should not exist", filePath); - assertTrue(fs.exists(dirPath)); + assertPathExists(fs, "This path should exist", dirPath); fs.delete(dirPath, false); - assertFalse(fs.exists(dirPath)); + assertPathDoesNotExist(fs, "This path should not exist", dirPath); } @Test @@ -267,11 +269,11 @@ public void testDeleteRecursive() throws Exception { stream.writeBytes("hello"); } - assertTrue(fs.exists(dirPath)); - assertTrue(fs.exists(filePath)); + assertPathExists(fs, "This path should exist", dirPath); + assertPathExists(fs, "This path should exist", filePath); fs.delete(dirPath, true); - assertFalse(fs.exists(filePath)); - assertFalse(fs.exists(dirPath)); + assertPathDoesNotExist(fs, "This path should not exist", filePath); + assertPathDoesNotExist(fs, "This path should not exist", dirPath); } @Test @@ -395,10 +397,11 @@ public void testProperties() throws Exception { @Test public void testSignatureMask() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - String src = "/testABC/test.xt"; - fs.create(new Path(src)); + String src = String.format("/testABC/test%s.xt", UUID.randomUUID()); + fs.create(new Path(src)).close(); AbfsRestOperation abfsHttpRestOperation = fs.getAbfsClient() - .renamePath(src, "/testABC" + "/abc.txt", null); + .renamePath(src, "/testABC" + "/abc.txt", null, + getTestTracingContext(fs, false)); AbfsHttpOperation result = abfsHttpRestOperation.getResult(); String url = result.getSignatureMaskedUrl(); String encodedUrl = result.getSignatureMaskedEncodedUrl(); @@ -414,7 +417,8 @@ public void testSignatureMask() throws Exception { public void testSignatureMaskOnExceptionMessage() throws Exception { intercept(IOException.class, "sig=XXXX", () -> getFileSystem().getAbfsClient() - .renamePath("testABC/test.xt", "testABC/abc.txt", null)); + .renamePath("testABC/test.xt", "testABC/abc.txt", null, + getTestTracingContext(getFileSystem(), false))); } @Test diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java index 9bd82dbb03df6..b9b846f52d2dd 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java @@ -30,6 +30,7 @@ import org.junit.Assume; import org.junit.Test; +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.services.AbfsClient; import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation; @@ -37,6 +38,8 @@ import org.apache.hadoop.fs.azurebfs.services.TestAbfsClient; import org.apache.hadoop.fs.azurebfs.services.TestAbfsPerfTracker; import org.apache.hadoop.fs.azurebfs.utils.TestMockHelpers; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; +import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderValidator; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; @@ -76,12 +79,13 @@ public ITestAzureBlobFileSystemDelete() throws Exception { public void testDeleteRoot() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - fs.mkdirs(new Path("/testFolder0")); - fs.mkdirs(new Path("/testFolder1")); - fs.mkdirs(new Path("/testFolder2")); - touch(new Path("/testFolder1/testfile")); - touch(new Path("/testFolder1/testfile2")); - touch(new Path("/testFolder1/testfile3")); + Path testPath = path("/testFolder"); + fs.mkdirs(new Path(testPath + "_0")); + fs.mkdirs(new Path(testPath + "_1")); + fs.mkdirs(new Path(testPath + "_2")); + touch(new Path(testPath + "_1/testfile")); + touch(new Path(testPath + "_1/testfile2")); + touch(new Path(testPath + "_1/testfile3")); Path root = new Path("/"); FileStatus[] ls = fs.listStatus(root); @@ -95,7 +99,7 @@ public void testDeleteRoot() throws Exception { @Test() public void testOpenFileAfterDelete() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testfile = new Path("/testFile"); + Path testfile = path("/testFile"); touch(testfile); assertDeleted(fs, testfile, false); @@ -106,7 +110,7 @@ public void testOpenFileAfterDelete() throws Exception { @Test public void testEnsureFileIsDeleted() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testfile = new Path("testfile"); + Path testfile = path("testfile"); touch(testfile); assertDeleted(fs, testfile, false); assertPathDoesNotExist(fs, "deleted", testfile); @@ -115,10 +119,10 @@ public void testEnsureFileIsDeleted() throws Exception { @Test public void testDeleteDirectory() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path dir = new Path("testfile"); + Path dir = path("testfile"); fs.mkdirs(dir); - fs.mkdirs(new Path("testfile/test1")); - fs.mkdirs(new Path("testfile/test1/test2")); + fs.mkdirs(new Path(dir + "/test1")); + fs.mkdirs(new Path(dir + "/test1/test2")); assertDeleted(fs, dir, true); assertPathDoesNotExist(fs, "deleted", dir); @@ -130,8 +134,9 @@ public void testDeleteFirstLevelDirectory() throws Exception { final List> tasks = new ArrayList<>(); ExecutorService es = Executors.newFixedThreadPool(10); + Path dir = path("/test"); for (int i = 0; i < 1000; i++) { - final Path fileName = new Path("/test/" + i); + final Path fileName = new Path(dir + "/" + i); Callable callable = new Callable() { @Override public Void call() throws Exception { @@ -148,10 +153,13 @@ public Void call() throws Exception { } es.shutdownNow(); - Path dir = new Path("/test"); + fs.registerListener(new TracingHeaderValidator( + fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), + fs.getFileSystemId(), FSOperationType.DELETE, false, 0)); // first try a non-recursive delete, expect failure intercept(FileAlreadyExistsException.class, () -> fs.delete(dir, false)); + fs.registerListener(null); assertDeleted(fs, dir, true); assertPathDoesNotExist(fs, "deleted", dir); @@ -222,13 +230,14 @@ public void testDeleteIdempotencyTriggerHttp404() throws Exception { intercept(AbfsRestOperationException.class, () -> fs.getAbfsStore().delete( new Path("/NonExistingPath"), - false)); + false, getTestTracingContext(fs, false))); intercept(AbfsRestOperationException.class, () -> client.deletePath( "/NonExistingPath", false, - null)); + null, + getTestTracingContext(fs, true))); // mock idempotency check to mimic retried case AbfsClient mockClient = TestAbfsClient.getMockAbfsClient( @@ -241,7 +250,8 @@ public void testDeleteIdempotencyTriggerHttp404() throws Exception { mockStore, "abfsPerfTracker", TestAbfsPerfTracker.getAPerfTrackerInstance(this.getConfiguration())); - doCallRealMethod().when(mockStore).delete(new Path("/NonExistingPath"), false); + doCallRealMethod().when(mockStore).delete(new Path("/NonExistingPath"), + false, getTestTracingContext(fs, false)); // Case 2: Mimic retried case // Idempotency check on Delete always returns success @@ -252,13 +262,15 @@ public void testDeleteIdempotencyTriggerHttp404() throws Exception { idempotencyRetOp.hardSetResult(HTTP_OK); doReturn(idempotencyRetOp).when(mockClient).deleteIdempotencyCheckOp(any()); - when(mockClient.deletePath("/NonExistingPath", false, - null)).thenCallRealMethod(); + TracingContext tracingContext = getTestTracingContext(fs, false); + when(mockClient.deletePath("/NonExistingPath", false, null, tracingContext)) + .thenCallRealMethod(); Assertions.assertThat(mockClient.deletePath( "/NonExistingPath", false, - null) + null, + tracingContext) .getResult() .getStatusCode()) .describedAs("Idempotency check reports successful " @@ -266,7 +278,7 @@ public void testDeleteIdempotencyTriggerHttp404() throws Exception { .isEqualTo(idempotencyRetOp.getResult().getStatusCode()); // Call from AzureBlobFileSystemStore should not fail either - mockStore.delete(new Path("/NonExistingPath"), false); + mockStore.delete(new Path("/NonExistingPath"), false, getTestTracingContext(fs, false)); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java index 05c3855f5c89d..56016a39470e4 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java @@ -33,6 +33,8 @@ import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_TOLERATE_CONCURRENT_APPEND; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** @@ -52,14 +54,14 @@ public ITestAzureBlobFileSystemE2E() throws Exception { @Test public void testWriteOneByteToFile() throws Exception { - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); testWriteOneByteToFile(testFilePath); } @Test public void testReadWriteBytesToFile() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); testWriteOneByteToFile(testFilePath); try(FSDataInputStream inputStream = fs.open(testFilePath, TEST_DEFAULT_BUFFER_SIZE)) { @@ -78,7 +80,7 @@ public void testOOBWritesAndReadFail() throws Exception { final byte[] b = new byte[2 * readBufferSize]; new Random().nextBytes(b); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); try(FSDataOutputStream writeStream = fs.create(testFilePath)) { writeStream.write(b); writeStream.flush(); @@ -107,7 +109,7 @@ public void testOOBWritesAndReadSucceed() throws Exception { byte[] bytesToRead = new byte[readBufferSize]; final byte[] b = new byte[2 * readBufferSize]; new Random().nextBytes(b); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); try (FSDataOutputStream writeStream = fs.create(testFilePath)) { writeStream.write(b); @@ -130,7 +132,7 @@ public void testOOBWritesAndReadSucceed() throws Exception { @Test public void testWriteWithBufferOffset() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); final byte[] b = new byte[1024 * 1000]; new Random().nextBytes(b); @@ -151,7 +153,7 @@ public void testWriteWithBufferOffset() throws Exception { @Test public void testReadWriteHeavyBytesToFileWithSmallerChunks() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); final byte[] writeBuffer = new byte[5 * 1000 * 1024]; new Random().nextBytes(writeBuffer); @@ -171,50 +173,51 @@ public void testReadWriteHeavyBytesToFileWithSmallerChunks() throws Exception { @Test public void testReadWithFileNotFoundException() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); testWriteOneByteToFile(testFilePath); - FSDataInputStream inputStream = fs.open(testFilePath, TEST_DEFAULT_BUFFER_SIZE); - fs.delete(testFilePath, true); - assertFalse(fs.exists(testFilePath)); + try (FSDataInputStream inputStream = fs.open(testFilePath, + TEST_DEFAULT_BUFFER_SIZE)) { + fs.delete(testFilePath, true); + assertPathDoesNotExist(fs, "This path should not exist", testFilePath); - intercept(FileNotFoundException.class, - () -> inputStream.read(new byte[1])); + intercept(FileNotFoundException.class, () -> inputStream.read(new byte[1])); + } } @Test public void testWriteWithFileNotFoundException() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); - FSDataOutputStream stream = fs.create(testFilePath); - assertTrue(fs.exists(testFilePath)); - stream.write(TEST_BYTE); + try (FSDataOutputStream stream = fs.create(testFilePath)) { + assertPathExists(fs, "Path should exist", testFilePath); + stream.write(TEST_BYTE); - fs.delete(testFilePath, true); - assertFalse(fs.exists(testFilePath)); + fs.delete(testFilePath, true); + assertPathDoesNotExist(fs, "This path should not exist", testFilePath); - // trigger append call - intercept(FileNotFoundException.class, - () -> stream.close()); + // trigger append call + intercept(FileNotFoundException.class, () -> stream.close()); + } } @Test public void testFlushWithFileNotFoundException() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(testFilePath).toString())) { return; } - FSDataOutputStream stream = fs.create(testFilePath); - assertTrue(fs.exists(testFilePath)); + try (FSDataOutputStream stream = fs.create(testFilePath)) { + assertPathExists(fs, "This path should exist", testFilePath); - fs.delete(testFilePath, true); - assertFalse(fs.exists(testFilePath)); + fs.delete(testFilePath, true); + assertPathDoesNotExist(fs, "This path should not exist", testFilePath); - intercept(FileNotFoundException.class, - () -> stream.close()); + intercept(FileNotFoundException.class, () -> stream.close()); + } } private void testWriteOneByteToFile(Path testFilePath) throws Exception { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java index 421fa9a65cc05..4fa7a0fca68ae 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java @@ -27,6 +27,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; + /** * Test FileStatus. */ @@ -37,8 +39,8 @@ public class ITestAzureBlobFileSystemFileStatus extends private static final String DEFAULT_UMASK_VALUE = "027"; private static final String FULL_PERMISSION = "777"; - private static final Path TEST_FILE = new Path("testFile"); - private static final Path TEST_FOLDER = new Path("testDir"); + private static final String TEST_FILE = "testFile"; + private static final String TEST_FOLDER = "testDir"; public ITestAzureBlobFileSystemFileStatus() throws Exception { super(); @@ -57,8 +59,9 @@ public void testEnsureStatusWorksForRoot() throws Exception { public void testFileStatusPermissionsAndOwnerAndGroup() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); fs.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, DEFAULT_UMASK_VALUE); - touch(TEST_FILE); - validateStatus(fs, TEST_FILE, false); + Path testFile = path(TEST_FILE); + touch(testFile); + validateStatus(fs, testFile, false); } private FileStatus validateStatus(final AzureBlobFileSystem fs, final Path name, final boolean isDir) @@ -67,7 +70,7 @@ private FileStatus validateStatus(final AzureBlobFileSystem fs, final Path name, String errorInStatus = "error in " + fileStatus + " from " + fs; - if (!fs.getIsNamespaceEnabled()) { + if (!getIsNamespaceEnabled(fs)) { assertEquals(errorInStatus + ": owner", fs.getOwnerUser(), fileStatus.getOwner()); assertEquals(errorInStatus + ": group", @@ -93,9 +96,10 @@ private FileStatus validateStatus(final AzureBlobFileSystem fs, final Path name, public void testFolderStatusPermissionsAndOwnerAndGroup() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); fs.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, DEFAULT_UMASK_VALUE); - fs.mkdirs(TEST_FOLDER); + Path testFolder = path(TEST_FOLDER); + fs.mkdirs(testFolder); - validateStatus(fs, TEST_FOLDER, true); + validateStatus(fs, testFolder, true); } @Test @@ -108,11 +112,11 @@ public void testAbfsPathWithHost() throws IOException { Path pathwithouthost2 = new Path("/abfs/file2.txt"); // verify compatibility of this path format - fs.create(pathWithHost1); - assertTrue(fs.exists(pathwithouthost1)); + fs.create(pathWithHost1).close(); + assertPathExists(fs, "This path should exist", pathwithouthost1); - fs.create(pathwithouthost2); - assertTrue(fs.exists(pathWithHost2)); + fs.create(pathwithouthost2).close(); + assertPathExists(fs, "This path should exist", pathWithHost2); // verify get FileStatus fileStatus1 = fs.getFileStatus(pathWithHost1); @@ -125,13 +129,13 @@ public void testAbfsPathWithHost() throws IOException { @Test public void testLastModifiedTime() throws IOException { AzureBlobFileSystem fs = this.getFileSystem(); - Path testFilePath = new Path("childfile1.txt"); + Path testFilePath = path("childfile1.txt"); long createStartTime = System.currentTimeMillis(); long minCreateStartTime = (createStartTime / 1000) * 1000 - 1; // Dividing and multiplying by 1000 to make last 3 digits 0. // It is observed that modification time is returned with last 3 // digits 0 always. - fs.create(testFilePath); + fs.create(testFilePath).close(); long createEndTime = System.currentTimeMillis(); FileStatus fStat = fs.getFileStatus(testFilePath); long lastModifiedTime = fStat.getModificationTime(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java index ec33257060278..d27f9fa62194d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java @@ -29,8 +29,12 @@ import java.util.concurrent.Future; import java.io.IOException; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StreamCapabilities; +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; +import org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys; import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; +import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderValidator; import org.hamcrest.core.IsEqual; import org.hamcrest.core.IsNot; import org.junit.Test; @@ -41,6 +45,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_APPEND_BLOB_KEY; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertHasStreamCapabilities; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertLacksStreamCapabilities; @@ -301,6 +306,25 @@ public void testHsyncWithFlushEnabled() throws Exception { } } + @Test + public void testTracingHeaderForAppendBlob() throws Exception { + Configuration config = new Configuration(this.getRawConfiguration()); + config.set(FS_AZURE_APPEND_BLOB_KEY, "abfss:/"); + config.set(TestConfigurationKeys.FS_AZURE_TEST_APPENDBLOB_ENABLED, "true"); + AzureBlobFileSystem fs = (AzureBlobFileSystem) FileSystem + .newInstance(config); + + byte[] buf = new byte[10]; + new Random().nextBytes(buf); + try (FSDataOutputStream out = fs.create(new Path("/testFile"))) { + ((AbfsOutputStream) out.getWrappedStream()).registerListener(new TracingHeaderValidator( + fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), fs.getFileSystemId(), FSOperationType.WRITE, false, 0, + ((AbfsOutputStream) out.getWrappedStream()).getStreamID())); + out.write(buf); + out.hsync(); + } + } + @Test public void testStreamCapabilitiesWithFlushDisabled() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemLease.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemLease.java index 9857da8957e22..2894abe4d0e2b 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemLease.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemLease.java @@ -26,13 +26,18 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; import org.apache.hadoop.fs.azurebfs.services.AbfsClient; import org.apache.hadoop.fs.azurebfs.services.AbfsLease; import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; +import org.apache.hadoop.fs.azurebfs.utils.Listener; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; +import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderValidator; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doThrow; @@ -210,7 +215,11 @@ public void testWriteAfterBreakLease() throws Exception { out.write(0); out.hsync(); + fs.registerListener(new TracingHeaderValidator( + getConfiguration().getClientCorrelationId(), fs.getFileSystemId(), + FSOperationType.BREAK_LEASE, false, 0)); fs.breakLease(testFilePath); + fs.registerListener(null); LambdaTestUtils.intercept(IOException.class, ERR_LEASE_EXPIRED, () -> { out.write(1); @@ -308,29 +317,38 @@ public void testAcquireRetry() throws Exception { final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); fs.mkdirs(testFilePath.getParent()); fs.createNewFile(testFilePath); - - AbfsLease lease = new AbfsLease(fs.getAbfsClient(), testFilePath.toUri().getPath()); + TracingContext tracingContext = getTestTracingContext(fs, true); + Listener listener = new TracingHeaderValidator( + getConfiguration().getClientCorrelationId(), fs.getFileSystemId(), + FSOperationType.TEST_OP, true, 0); + tracingContext.setListener(listener); + + AbfsLease lease = new AbfsLease(fs.getAbfsClient(), + testFilePath.toUri().getPath(), tracingContext); Assert.assertNotNull("Did not successfully lease file", lease.getLeaseID()); + listener.setOperation(FSOperationType.RELEASE_LEASE); lease.free(); + lease.getTracingContext().setListener(null); Assert.assertEquals("Unexpected acquire retry count", 0, lease.getAcquireRetryCount()); AbfsClient mockClient = spy(fs.getAbfsClient()); doThrow(new AbfsLease.LeaseException("failed to acquire 1")) .doThrow(new AbfsLease.LeaseException("failed to acquire 2")) - .doCallRealMethod() - .when(mockClient).acquireLease(anyString(), anyInt()); + .doCallRealMethod().when(mockClient) + .acquireLease(anyString(), anyInt(), any(TracingContext.class)); - lease = new AbfsLease(mockClient, testFilePath.toUri().getPath(), 5, 1); + lease = new AbfsLease(mockClient, testFilePath.toUri().getPath(), 5, 1, tracingContext); Assert.assertNotNull("Acquire lease should have retried", lease.getLeaseID()); lease.free(); Assert.assertEquals("Unexpected acquire retry count", 2, lease.getAcquireRetryCount()); - doThrow(new AbfsLease.LeaseException("failed to acquire")) - .when(mockClient).acquireLease(anyString(), anyInt()); + doThrow(new AbfsLease.LeaseException("failed to acquire")).when(mockClient) + .acquireLease(anyString(), anyInt(), any(TracingContext.class)); LambdaTestUtils.intercept(AzureBlobFileSystemException.class, () -> { - new AbfsLease(mockClient, testFilePath.toUri().getPath(), 5, 1); + new AbfsLease(mockClient, testFilePath.toUri().getPath(), 5, 1, + tracingContext); }); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java index 31f92d2bd3890..8d1330b5ea7dd 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java @@ -35,6 +35,8 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; +import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderValidator; import org.apache.hadoop.fs.contract.ContractTestUtils; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_LIST_MAX_RESULTS; @@ -83,6 +85,9 @@ public Void call() throws Exception { } es.shutdownNow(); + fs.registerListener( + new TracingHeaderValidator(getConfiguration().getClientCorrelationId(), + fs.getFileSystemId(), FSOperationType.LISTSTATUS, true, 0)); FileStatus[] files = fs.listStatus(new Path("/")); assertEquals(TEST_FILES_NUMBER, files.length /* user directory */); } @@ -94,7 +99,7 @@ public Void call() throws Exception { @Test public void testListFileVsListDir() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path path = new Path("/testFile"); + Path path = path("/testFile"); try(FSDataOutputStream ignored = fs.create(path)) { FileStatus[] testFiles = fs.listStatus(path); assertEquals("length of test files", 1, testFiles.length); @@ -106,19 +111,20 @@ public void testListFileVsListDir() throws Exception { @Test public void testListFileVsListDir2() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - fs.mkdirs(new Path("/testFolder")); - fs.mkdirs(new Path("/testFolder/testFolder2")); - fs.mkdirs(new Path("/testFolder/testFolder2/testFolder3")); - Path testFile0Path = new Path("/testFolder/testFolder2/testFolder3/testFile"); + Path testFolder = path("/testFolder"); + fs.mkdirs(testFolder); + fs.mkdirs(new Path(testFolder + "/testFolder2")); + fs.mkdirs(new Path(testFolder + "/testFolder2/testFolder3")); + Path testFile0Path = new Path( + testFolder + "/testFolder2/testFolder3/testFile"); ContractTestUtils.touch(fs, testFile0Path); FileStatus[] testFiles = fs.listStatus(testFile0Path); assertEquals("Wrong listing size of file " + testFile0Path, 1, testFiles.length); FileStatus file0 = testFiles[0]; - assertEquals("Wrong path for " + file0, - new Path(getTestUrl(), "/testFolder/testFolder2/testFolder3/testFile"), - file0.getPath()); + assertEquals("Wrong path for " + file0, new Path(getTestUrl(), + testFolder + "/testFolder2/testFolder3/testFile"), file0.getPath()); assertIsFileReference(file0); } @@ -131,18 +137,18 @@ public void testListNonExistentDir() throws Exception { @Test public void testListFiles() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testDir = new Path("/test"); + Path testDir = path("/test"); fs.mkdirs(testDir); FileStatus[] fileStatuses = fs.listStatus(new Path("/")); assertEquals(1, fileStatuses.length); - fs.mkdirs(new Path("/test/sub")); + fs.mkdirs(new Path(testDir + "/sub")); fileStatuses = fs.listStatus(testDir); assertEquals(1, fileStatuses.length); assertEquals("sub", fileStatuses[0].getPath().getName()); assertIsDirectoryReference(fileStatuses[0]); - Path childF = fs.makeQualified(new Path("/test/f")); + Path childF = fs.makeQualified(new Path(testDir + "/f")); touch(childF); fileStatuses = fs.listStatus(testDir); assertEquals(2, fileStatuses.length); @@ -188,7 +194,7 @@ public void testMkdirTrailingPeriodDirName() throws IOException { final AzureBlobFileSystem fs = getFileSystem(); Path nontrailingPeriodDir = path("testTrailingDir/dir"); - Path trailingPeriodDir = path("testTrailingDir/dir."); + Path trailingPeriodDir = new Path("testMkdirTrailingDir/dir."); assertMkdirs(fs, nontrailingPeriodDir); @@ -207,8 +213,8 @@ public void testCreateTrailingPeriodFileName() throws IOException { boolean exceptionThrown = false; final AzureBlobFileSystem fs = getFileSystem(); - Path trailingPeriodFile = path("testTrailingDir/file."); - Path nontrailingPeriodFile = path("testTrailingDir/file"); + Path trailingPeriodFile = new Path("testTrailingDir/file."); + Path nontrailingPeriodFile = path("testCreateTrailingDir/file"); createFile(fs, nontrailingPeriodFile, false, new byte[0]); assertPathExists(fs, "Trailing period file does not exist", @@ -230,7 +236,7 @@ public void testRenameTrailingPeriodFile() throws IOException { final AzureBlobFileSystem fs = getFileSystem(); Path nonTrailingPeriodFile = path("testTrailingDir/file"); - Path trailingPeriodFile = path("testTrailingDir/file."); + Path trailingPeriodFile = new Path("testRenameTrailingDir/file."); createFile(fs, nonTrailingPeriodFile, false, new byte[0]); try { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java index 0db9529326702..bc6f35c66bc53 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java @@ -45,10 +45,11 @@ public ITestAzureBlobFileSystemMkDir() throws Exception { @Test public void testCreateDirWithExistingDir() throws Exception { - Assume.assumeTrue(DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE || !getFileSystem() - .getIsNamespaceEnabled()); + Assume.assumeTrue( + DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE || !getIsNamespaceEnabled( + getFileSystem())); final AzureBlobFileSystem fs = getFileSystem(); - Path path = new Path("testFolder"); + Path path = path("testFolder"); assertMkdirs(fs, path); assertMkdirs(fs, path); } @@ -58,12 +59,12 @@ public void testMkdirExistingDirOverwriteFalse() throws Exception { Assume.assumeFalse("Ignore test until default overwrite is set to false", DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE); Assume.assumeTrue("Ignore test for Non-HNS accounts", - getFileSystem().getIsNamespaceEnabled()); + getIsNamespaceEnabled(getFileSystem())); //execute test only for HNS account with default overwrite=false Configuration config = new Configuration(this.getRawConfiguration()); config.set(FS_AZURE_ENABLE_MKDIR_OVERWRITE, Boolean.toString(false)); AzureBlobFileSystem fs = getFileSystem(config); - Path path = new Path("testFolder"); + Path path = path("testFolder"); assertMkdirs(fs, path); //checks that mkdirs returns true long timeCreated = fs.getFileStatus(path).getModificationTime(); assertMkdirs(fs, path); //call to existing dir should return success @@ -74,11 +75,11 @@ public void testMkdirExistingDirOverwriteFalse() throws Exception { @Test public void createDirWithExistingFilename() throws Exception { Assume.assumeFalse("Ignore test until default overwrite is set to false", - DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE && getFileSystem() - .getIsNamespaceEnabled()); + DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE && getIsNamespaceEnabled( + getFileSystem())); final AzureBlobFileSystem fs = getFileSystem(); - Path path = new Path("testFilePath"); - fs.create(path); + Path path = path("testFilePath"); + fs.create(path).close(); assertTrue(fs.getFileStatus(path).isFile()); intercept(FileAlreadyExistsException.class, () -> fs.mkdirs(path)); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java index e517f685784e7..f27e75839b73f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode; import org.apache.hadoop.fs.azurebfs.services.AuthType; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import org.apache.hadoop.io.IOUtils; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID; @@ -44,6 +45,8 @@ import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_DATA_CONTRIBUTOR_CLIENT_SECRET; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_DATA_READER_CLIENT_ID; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_DATA_READER_CLIENT_SECRET; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; /** * Test Azure Oauth with Blob Data contributor role and Blob Data Reader role. @@ -53,8 +56,8 @@ public class ITestAzureBlobFileSystemOauth extends AbstractAbfsIntegrationTest{ private static final Path FILE_PATH = new Path("/testFile"); - private static final Path EXISTED_FILE_PATH = new Path("/existedFile"); - private static final Path EXISTED_FOLDER_PATH = new Path("/existedFolder"); + private static final String EXISTED_FILE_PATH = "/existedFile"; + private static final String EXISTED_FOLDER_PATH = "/existedFolder"; private static final Logger LOG = LoggerFactory.getLogger(ITestAbfsStreamStatistics.class); @@ -71,7 +74,9 @@ public void testBlobDataContributor() throws Exception { String secret = this.getConfiguration().get(TestConfigurationKeys.FS_AZURE_BLOB_DATA_CONTRIBUTOR_CLIENT_SECRET); Assume.assumeTrue("Contributor client secret not provided", secret != null); - prepareFiles(); + Path existedFilePath = path(EXISTED_FILE_PATH); + Path existedFolderPath = path(EXISTED_FOLDER_PATH); + prepareFiles(existedFilePath, existedFolderPath); final AzureBlobFileSystem fs = getBlobConributor(); @@ -79,39 +84,39 @@ public void testBlobDataContributor() throws Exception { try(FSDataOutputStream stream = fs.create(FILE_PATH)) { stream.write(0); } - assertTrue(fs.exists(FILE_PATH)); + assertPathExists(fs, "This path should exist", FILE_PATH); FileStatus fileStatus = fs.getFileStatus(FILE_PATH); assertEquals(1, fileStatus.getLen()); // delete file assertTrue(fs.delete(FILE_PATH, true)); - assertFalse(fs.exists(FILE_PATH)); + assertPathDoesNotExist(fs, "This path should not exist", FILE_PATH); // Verify Blob Data Contributor has full access to existed folder, file // READ FOLDER - assertTrue(fs.exists(EXISTED_FOLDER_PATH)); + assertPathExists(fs, "This path should exist", existedFolderPath); //DELETE FOLDER - fs.delete(EXISTED_FOLDER_PATH, true); - assertFalse(fs.exists(EXISTED_FOLDER_PATH)); + fs.delete(existedFolderPath, true); + assertPathDoesNotExist(fs, "This path should not exist", existedFolderPath); // READ FILE - try (FSDataInputStream stream = fs.open(EXISTED_FILE_PATH)) { + try (FSDataInputStream stream = fs.open(existedFilePath)) { assertTrue(stream.read() != 0); } - assertEquals(0, fs.getFileStatus(EXISTED_FILE_PATH).getLen()); + assertEquals(0, fs.getFileStatus(existedFilePath).getLen()); // WRITE FILE - try (FSDataOutputStream stream = fs.append(EXISTED_FILE_PATH)) { + try (FSDataOutputStream stream = fs.append(existedFilePath)) { stream.write(0); } - assertEquals(1, fs.getFileStatus(EXISTED_FILE_PATH).getLen()); + assertEquals(1, fs.getFileStatus(existedFilePath).getLen()); // REMOVE FILE - fs.delete(EXISTED_FILE_PATH, true); - assertFalse(fs.exists(EXISTED_FILE_PATH)); + fs.delete(existedFilePath, true); + assertPathDoesNotExist(fs, "This path should not exist", existedFilePath); } /* @@ -124,31 +129,36 @@ public void testBlobDataReader() throws Exception { String secret = this.getConfiguration().get(TestConfigurationKeys.FS_AZURE_BLOB_DATA_READER_CLIENT_SECRET); Assume.assumeTrue("Reader client secret not provided", secret != null); - prepareFiles(); + Path existedFilePath = path(EXISTED_FILE_PATH); + Path existedFolderPath = path(EXISTED_FOLDER_PATH); + prepareFiles(existedFilePath, existedFolderPath); final AzureBlobFileSystem fs = getBlobReader(); // Use abfsStore in this test to verify the ERROR code in AbfsRestOperationException AzureBlobFileSystemStore abfsStore = fs.getAbfsStore(); + TracingContext tracingContext = getTestTracingContext(fs, true); // TEST READ FS - Map properties = abfsStore.getFilesystemProperties(); + Map properties = abfsStore.getFilesystemProperties(tracingContext); // TEST READ FOLDER - assertTrue(fs.exists(EXISTED_FOLDER_PATH)); + assertPathExists(fs, "This path should exist", existedFolderPath); // TEST DELETE FOLDER try { - abfsStore.delete(EXISTED_FOLDER_PATH, true); + abfsStore.delete(existedFolderPath, true, tracingContext); } catch (AbfsRestOperationException e) { assertEquals(AzureServiceErrorCode.AUTHORIZATION_PERMISSION_MISS_MATCH, e.getErrorCode()); } // TEST READ FILE - try (InputStream inputStream = abfsStore.openFileForRead(EXISTED_FILE_PATH, null)) { + try (InputStream inputStream = abfsStore + .openFileForRead(existedFilePath, null, tracingContext)) { assertTrue(inputStream.read() != 0); } // TEST WRITE FILE try { - abfsStore.openFileForWrite(EXISTED_FILE_PATH, fs.getFsStatistics(), true); + abfsStore.openFileForWrite(existedFilePath, fs.getFsStatistics(), true, + tracingContext); } catch (AbfsRestOperationException e) { assertEquals(AzureServiceErrorCode.AUTHORIZATION_PERMISSION_MISS_MATCH, e.getErrorCode()); } finally { @@ -157,14 +167,14 @@ public void testBlobDataReader() throws Exception { } - private void prepareFiles() throws IOException { + private void prepareFiles(Path existedFilePath, Path existedFolderPath) throws IOException { // create test files/folders to verify access control diff between // Blob data contributor and Blob data reader final AzureBlobFileSystem fs = this.getFileSystem(); - fs.create(EXISTED_FILE_PATH); - assertTrue(fs.exists(EXISTED_FILE_PATH)); - fs.mkdirs(EXISTED_FOLDER_PATH); - assertTrue(fs.exists(EXISTED_FOLDER_PATH)); + fs.create(existedFilePath).close(); + assertPathExists(fs, "This path should exist", existedFilePath); + fs.mkdirs(existedFolderPath); + assertPathExists(fs, "This path should exist", existedFolderPath); } private AzureBlobFileSystem getBlobConributor() throws Exception { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java index 257fb4fdbd2ab..0d644b6c743d0 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java @@ -76,7 +76,7 @@ public static Collection abfsCreateNonRecursiveTestData() public void testFilePermission() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(fs.getIsNamespaceEnabled()); + Assume.assumeTrue(getIsNamespaceEnabled(fs)); fs.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, DEFAULT_UMASK_VALUE); path = new Path(testRoot, UUID.randomUUID().toString()); @@ -84,7 +84,8 @@ public void testFilePermission() throws Exception { new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE)); fs.removeDefaultAcl(path.getParent()); - fs.create(path, permission, true, KILOBYTE, (short) 1, KILOBYTE - 1, null); + fs.create(path, permission, true, KILOBYTE, (short) 1, KILOBYTE - 1, + null).close(); FileStatus status = fs.getFileStatus(path); Assert.assertEquals(permission.applyUMask(DEFAULT_UMASK_PERMISSION), status.getPermission()); } @@ -92,7 +93,7 @@ public void testFilePermission() throws Exception { @Test public void testFolderPermission() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(fs.getIsNamespaceEnabled()); + Assume.assumeTrue(getIsNamespaceEnabled(fs)); fs.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "027"); path = new Path(testRoot, UUID.randomUUID().toString()); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java index ef531acb2bbbc..c1f0e06439950 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java @@ -86,7 +86,7 @@ public ITestAzureBlobFileSystemRandomRead() throws Exception { @Test public void testBasicRead() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testBasicRead"); + Path testPath = path(TEST_FILE_PREFIX + "_testBasicRead"); assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -114,8 +114,8 @@ public void testBasicRead() throws Exception { @Test public void testRandomRead() throws Exception { Assume.assumeFalse("This test does not support namespace enabled account", - this.getFileSystem().getIsNamespaceEnabled()); - Path testPath = new Path(TEST_FILE_PREFIX + "_testRandomRead"); + getIsNamespaceEnabled(getFileSystem())); + Path testPath = path(TEST_FILE_PREFIX + "_testRandomRead"); assumeHugeFileExists(testPath); try ( @@ -174,7 +174,7 @@ public void testRandomRead() throws Exception { */ @Test public void testSeekToNewSource() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testSeekToNewSource"); + Path testPath = path(TEST_FILE_PREFIX + "_testSeekToNewSource"); assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -189,7 +189,7 @@ public void testSeekToNewSource() throws Exception { */ @Test public void testSkipBounds() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testSkipBounds"); + Path testPath = path(TEST_FILE_PREFIX + "_testSkipBounds"); long testFileLength = assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -230,7 +230,7 @@ public Long call() throws Exception { */ @Test public void testValidateSeekBounds() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testValidateSeekBounds"); + Path testPath = path(TEST_FILE_PREFIX + "_testValidateSeekBounds"); long testFileLength = assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -281,7 +281,7 @@ public FSDataInputStream call() throws Exception { */ @Test public void testSeekAndAvailableAndPosition() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testSeekAndAvailableAndPosition"); + Path testPath = path(TEST_FILE_PREFIX + "_testSeekAndAvailableAndPosition"); long testFileLength = assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -347,7 +347,7 @@ public void testSeekAndAvailableAndPosition() throws Exception { */ @Test public void testSkipAndAvailableAndPosition() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testSkipAndAvailableAndPosition"); + Path testPath = path(TEST_FILE_PREFIX + "_testSkipAndAvailableAndPosition"); long testFileLength = assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -413,7 +413,8 @@ public void testSkipAndAvailableAndPosition() throws Exception { @Test public void testSequentialReadAfterReverseSeekPerformance() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testSequentialReadAfterReverseSeekPerformance"); + Path testPath = path( + TEST_FILE_PREFIX + "_testSequentialReadAfterReverseSeekPerformance"); assumeHugeFileExists(testPath); final int maxAttempts = 10; final double maxAcceptableRatio = 1.01; @@ -445,8 +446,8 @@ public void testSequentialReadAfterReverseSeekPerformance() @Ignore("HADOOP-16915") public void testRandomReadPerformance() throws Exception { Assume.assumeFalse("This test does not support namespace enabled account", - this.getFileSystem().getIsNamespaceEnabled()); - Path testPath = new Path(TEST_FILE_PREFIX + "_testRandomReadPerformance"); + getIsNamespaceEnabled(getFileSystem())); + Path testPath = path(TEST_FILE_PREFIX + "_testRandomReadPerformance"); assumeHugeFileExists(testPath); final AzureBlobFileSystem abFs = this.getFileSystem(); @@ -506,7 +507,8 @@ public void testAlwaysReadBufferSizeConfig(boolean alwaysReadBufferSizeConfigVal final AzureBlobFileSystem fs = createTestFile(testFile, 16 * MEGABYTE, 1 * MEGABYTE, config); String eTag = fs.getAbfsClient() - .getPathStatus(testFile.toUri().getPath(), false) + .getPathStatus(testFile.toUri().getPath(), false, + getTestTracingContext(fs, false)) .getResult() .getResponseHeader(ETAG); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java index 2adf70ca6457d..b12af5b0826ab 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java @@ -51,6 +51,7 @@ import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_CLOCK_SKEW_WITH_SERVER_IN_MS; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertMkdirs; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertRenameOutcome; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile; @@ -95,13 +96,13 @@ public void testRenameWithPreExistingDestination() throws Exception { @Test public void testRenameFileUnderDir() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path sourceDir = new Path("/testSrc"); + Path sourceDir = path("/testSrc"); assertMkdirs(fs, sourceDir); String filename = "file1"; Path file1 = new Path(sourceDir, filename); touch(file1); - Path destDir = new Path("/testDst"); + Path destDir = path("/testDst"); assertRenameOutcome(fs, sourceDir, destDir, true); FileStatus[] fileStatus = fs.listStatus(destDir); assertNotNull("Null file status", fileStatus); @@ -113,14 +114,15 @@ public void testRenameFileUnderDir() throws Exception { @Test public void testRenameDirectory() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - fs.mkdirs(new Path("testDir")); - Path test1 = new Path("testDir/test1"); + Path testDir = path("testDir"); + fs.mkdirs(testDir); + Path test1 = new Path(testDir + "/test1"); fs.mkdirs(test1); - fs.mkdirs(new Path("testDir/test1/test2")); - fs.mkdirs(new Path("testDir/test1/test2/test3")); + fs.mkdirs(new Path(testDir + "/test1/test2")); + fs.mkdirs(new Path(testDir + "/test1/test2/test3")); assertRenameOutcome(fs, test1, - new Path("testDir/test10"), true); + new Path(testDir + "/test10"), true); assertPathDoesNotExist(fs, "rename source dir", test1); } @@ -130,8 +132,9 @@ public void testRenameFirstLevelDirectory() throws Exception { final List> tasks = new ArrayList<>(); ExecutorService es = Executors.newFixedThreadPool(10); + Path source = path("/test"); for (int i = 0; i < 1000; i++) { - final Path fileName = new Path("/test/" + i); + final Path fileName = new Path(source + "/" + i); Callable callable = new Callable() { @Override public Void call() throws Exception { @@ -148,8 +151,7 @@ public Void call() throws Exception { } es.shutdownNow(); - Path source = new Path("/test"); - Path dest = new Path("/renamedDir"); + Path dest = path("/renamedDir"); assertRenameOutcome(fs, source, dest, true); FileStatus[] files = fs.listStatus(dest); @@ -173,14 +175,19 @@ public void testRenameRoot() throws Exception { @Test public void testPosixRenameDirectory() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - fs.mkdirs(new Path("testDir2/test1/test2/test3")); - fs.mkdirs(new Path("testDir2/test4")); - Assert.assertTrue(fs.rename(new Path("testDir2/test1/test2/test3"), new Path("testDir2/test4"))); - assertTrue(fs.exists(new Path("testDir2"))); - assertTrue(fs.exists(new Path("testDir2/test1/test2"))); - assertTrue(fs.exists(new Path("testDir2/test4"))); - assertTrue(fs.exists(new Path("testDir2/test4/test3"))); - assertFalse(fs.exists(new Path("testDir2/test1/test2/test3"))); + Path testDir2 = path("testDir2"); + fs.mkdirs(new Path(testDir2 + "/test1/test2/test3")); + fs.mkdirs(new Path(testDir2 + "/test4")); + Assert.assertTrue(fs.rename(new Path(testDir2 + "/test1/test2/test3"), new Path(testDir2 + "/test4"))); + assertPathExists(fs, "This path should exist", testDir2); + assertPathExists(fs, "This path should exist", + new Path(testDir2 + "/test1/test2")); + assertPathExists(fs, "This path should exist", + new Path(testDir2 + "/test4")); + assertPathExists(fs, "This path should exist", + new Path(testDir2 + "/test4/test3")); + assertPathDoesNotExist(fs, "This path should not exist", + new Path(testDir2 + "/test1/test2/test3")); } @Test @@ -240,8 +247,8 @@ private void testRenameIdempotencyTriggerChecks( AbfsRestOperation idempotencyRetOp = mock(AbfsRestOperation.class); when(idempotencyRetOp.getResult()).thenReturn(idempotencyRetHttpOp); doReturn(idempotencyRetOp).when(client).renameIdempotencyCheckOp(any(), - any(), any()); - when(client.renamePath(any(), any(), any())).thenCallRealMethod(); + any(), any(), any()); + when(client.renamePath(any(), any(), any(), any())).thenCallRealMethod(); // rename on non-existing source file will trigger idempotency check if (idempotencyRetHttpOp.getStatusCode() == HTTP_OK) { @@ -249,7 +256,8 @@ private void testRenameIdempotencyTriggerChecks( Assertions.assertThat(client.renamePath( "/NonExistingsourcepath", "/destpath", - null) + null, + getTestTracingContext(fs, true)) .getResult() .getStatusCode()) .describedAs("Idempotency check reports recent successful " @@ -261,7 +269,8 @@ private void testRenameIdempotencyTriggerChecks( () -> client.renamePath( "/NonExistingsourcepath", "/destpath", - "")); + "", + getTestTracingContext(fs, true))); } } @@ -304,7 +313,7 @@ private void testRenameTimeout( when(op.getResult()).thenReturn(http400Op); } else if (renameRequestStatus == HTTP_NOT_FOUND) { // Create the file new. - fs.create(destinationPath); + fs.create(destinationPath).close(); when(op.getResult()).thenReturn(http404Op); if (isOldOp) { @@ -321,7 +330,8 @@ private void testRenameTimeout( Assertions.assertThat(testClient.renameIdempotencyCheckOp( renameRequestStartTime, op, - destinationPath.toUri().getPath()) + destinationPath.toUri().getPath(), + getTestTracingContext(fs, true)) .getResult() .getStatusCode()) .describedAs(assertMessage) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java index 044c325c8c8dc..f913da7b15ed0 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java @@ -76,7 +76,7 @@ public ITestAzureBlobFileSystemRenameUnicode() throws Exception { @Test public void testRenameFileUsingUnicode() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path folderPath1 = new Path(srcDir); + Path folderPath1 = path(srcDir); assertMkdirs(fs, folderPath1); assertIsDirectory(fs, folderPath1); Path filePath = new Path(folderPath1 + "/" + filename); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemStoreListStatusWithRange.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemStoreListStatusWithRange.java index 849bb6ba0987b..ef7f1565df73f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemStoreListStatusWithRange.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemStoreListStatusWithRange.java @@ -107,7 +107,8 @@ public ITestAzureBlobFileSystemStoreListStatusWithRange() throws Exception { @Test public void testListWithRange() throws IOException { try { - FileStatus[] listResult = store.listStatus(new Path(path), startFrom); + FileStatus[] listResult = store.listStatus(new Path(path), startFrom, + getTestTracingContext(fs, true)); if (!expectedResult) { Assert.fail("Excepting failure with IllegalArgumentException"); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java index 74cf02a4f1f68..6b83fa8b9ebfa 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java @@ -32,12 +32,15 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; import org.apache.hadoop.fs.azurebfs.utils.AclTestHelpers; +import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderValidator; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; import static org.junit.Assume.assumeTrue; import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; @@ -88,7 +91,7 @@ public ITestAzureBlobFilesystemAcl() throws Exception { @Test public void testModifyAclEntries() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); fs.mkdirs(path, FsPermission.createImmutable((short) RWX_RX)); @@ -121,7 +124,7 @@ public void testModifyAclEntries() throws Exception { @Test public void testModifyAclEntriesOnlyAccess() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -145,7 +148,7 @@ public void testModifyAclEntriesOnlyAccess() throws Exception { @Test public void testModifyAclEntriesOnlyDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -168,7 +171,7 @@ public void testModifyAclEntriesOnlyDefault() throws Exception { @Test public void testModifyAclEntriesMinimal() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -186,7 +189,7 @@ public void testModifyAclEntriesMinimal() throws Exception { @Test public void testModifyAclEntriesMinimalDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -206,7 +209,7 @@ public void testModifyAclEntriesMinimalDefault() throws Exception { @Test public void testModifyAclEntriesCustomMask() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -225,7 +228,7 @@ public void testModifyAclEntriesCustomMask() throws Exception { @Test public void testModifyAclEntriesStickyBit() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 01750)); List aclSpec = Lists.newArrayList( @@ -255,7 +258,7 @@ public void testModifyAclEntriesStickyBit() throws Exception { @Test(expected=FileNotFoundException.class) public void testModifyAclEntriesPathNotFound() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); // Path has not been created. List aclSpec = Lists.newArrayList( @@ -269,7 +272,7 @@ public void testModifyAclEntriesPathNotFound() throws Exception { @Test (expected=Exception.class) public void testModifyAclEntriesDefaultOnFile() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -281,7 +284,7 @@ public void testModifyAclEntriesDefaultOnFile() throws Exception { @Test public void testModifyAclEntriesWithDefaultMask() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -305,7 +308,7 @@ public void testModifyAclEntriesWithDefaultMask() throws Exception { @Test public void testModifyAclEntriesWithAccessMask() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -326,7 +329,7 @@ public void testModifyAclEntriesWithAccessMask() throws Exception { @Test(expected=PathIOException.class) public void testModifyAclEntriesWithDuplicateEntries() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -342,7 +345,7 @@ public void testModifyAclEntriesWithDuplicateEntries() throws Exception { @Test public void testRemoveAclEntries() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -370,7 +373,7 @@ public void testRemoveAclEntries() throws Exception { @Test public void testRemoveAclEntriesOnlyAccess() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -395,7 +398,7 @@ public void testRemoveAclEntriesOnlyAccess() throws Exception { @Test public void testRemoveAclEntriesOnlyDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -422,7 +425,7 @@ public void testRemoveAclEntriesOnlyDefault() throws Exception { @Test public void testRemoveAclEntriesMinimal() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RWX_RW)); @@ -445,7 +448,7 @@ public void testRemoveAclEntriesMinimal() throws Exception { @Test public void testRemoveAclEntriesMinimalDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -473,7 +476,7 @@ public void testRemoveAclEntriesMinimalDefault() throws Exception { @Test public void testRemoveAclEntriesStickyBit() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 01750)); List aclSpec = Lists.newArrayList( @@ -501,7 +504,7 @@ public void testRemoveAclEntriesStickyBit() throws Exception { @Test(expected=FileNotFoundException.class) public void testRemoveAclEntriesPathNotFound() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); // Path has not been created. List aclSpec = Lists.newArrayList( @@ -512,7 +515,7 @@ public void testRemoveAclEntriesPathNotFound() throws Exception { @Test(expected=PathIOException.class) public void testRemoveAclEntriesAccessMask() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -526,7 +529,7 @@ public void testRemoveAclEntriesAccessMask() throws Exception { @Test(expected=PathIOException.class) public void testRemoveAclEntriesDefaultMask() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -540,7 +543,7 @@ public void testRemoveAclEntriesDefaultMask() throws Exception { @Test(expected=PathIOException.class) public void testRemoveAclEntriesWithDuplicateEntries() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -556,7 +559,7 @@ public void testRemoveAclEntriesWithDuplicateEntries() throws Exception { @Test public void testRemoveDefaultAcl() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -578,7 +581,7 @@ public void testRemoveDefaultAcl() throws Exception { @Test public void testRemoveDefaultAclOnlyAccess() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -600,7 +603,7 @@ public void testRemoveDefaultAclOnlyAccess() throws Exception { @Test public void testRemoveDefaultAclOnlyDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -616,7 +619,7 @@ public void testRemoveDefaultAclOnlyDefault() throws Exception { @Test public void testRemoveDefaultAclMinimal() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); fs.removeDefaultAcl(path); @@ -629,7 +632,7 @@ public void testRemoveDefaultAclMinimal() throws Exception { @Test public void testRemoveDefaultAclStickyBit() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 01750)); List aclSpec = Lists.newArrayList( @@ -651,7 +654,7 @@ public void testRemoveDefaultAclStickyBit() throws Exception { @Test(expected=FileNotFoundException.class) public void testRemoveDefaultAclPathNotFound() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); // Path has not been created. fs.removeDefaultAcl(path); @@ -660,7 +663,7 @@ public void testRemoveDefaultAclPathNotFound() throws Exception { @Test public void testRemoveAcl() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -682,7 +685,7 @@ public void testRemoveAcl() throws Exception { @Test public void testRemoveAclMinimalAcl() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -696,7 +699,7 @@ public void testRemoveAclMinimalAcl() throws Exception { @Test public void testRemoveAclStickyBit() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 01750)); List aclSpec = Lists.newArrayList( @@ -716,7 +719,7 @@ public void testRemoveAclStickyBit() throws Exception { @Test public void testRemoveAclOnlyDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -735,7 +738,7 @@ public void testRemoveAclOnlyDefault() throws Exception { @Test(expected=FileNotFoundException.class) public void testRemoveAclPathNotFound() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); // Path has not been created. fs.removeAcl(path); @@ -744,7 +747,7 @@ public void testRemoveAclPathNotFound() throws Exception { @Test public void testSetAcl() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -770,7 +773,7 @@ public void testSetAcl() throws Exception { @Test public void testSetAclOnlyAccess() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -791,7 +794,7 @@ public void testSetAclOnlyAccess() throws Exception { @Test public void testSetAclOnlyDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -811,7 +814,7 @@ public void testSetAclOnlyDefault() throws Exception { @Test public void testSetAclMinimal() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R_R)); @@ -835,7 +838,7 @@ public void testSetAclMinimal() throws Exception { @Test public void testSetAclMinimalDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -855,7 +858,7 @@ public void testSetAclMinimalDefault() throws Exception { @Test public void testSetAclCustomMask() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -877,7 +880,7 @@ public void testSetAclCustomMask() throws Exception { @Test public void testSetAclStickyBit() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 01750)); List aclSpec = Lists.newArrayList( @@ -903,7 +906,7 @@ public void testSetAclStickyBit() throws Exception { @Test(expected=FileNotFoundException.class) public void testSetAclPathNotFound() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); // Path has not been created. List aclSpec = Lists.newArrayList( @@ -917,7 +920,7 @@ public void testSetAclPathNotFound() throws Exception { @Test(expected=Exception.class) public void testSetAclDefaultOnFile() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -929,7 +932,7 @@ public void testSetAclDefaultOnFile() throws Exception { @Test public void testSetAclDoesNotChangeDefaultMask() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -953,7 +956,7 @@ public void testSetAclDoesNotChangeDefaultMask() throws Exception { @Test(expected=PathIOException.class) public void testSetAclWithDuplicateEntries() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -965,7 +968,7 @@ public void testSetAclWithDuplicateEntries() throws Exception { @Test public void testSetPermission() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -992,7 +995,7 @@ public void testSetPermission() throws Exception { @Test public void testSetPermissionOnlyAccess() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); fs.create(path).close(); fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); @@ -1014,7 +1017,7 @@ public void testSetPermissionOnlyAccess() throws Exception { @Test public void testSetPermissionOnlyDefault() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -1038,7 +1041,7 @@ public void testSetPermissionOnlyDefault() throws Exception { @Test public void testDefaultAclNewFile() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -1058,7 +1061,7 @@ public void testDefaultAclNewFile() throws Exception { @Ignore // wait umask fix to be deployed public void testOnlyAccessAclNewFile() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -1075,7 +1078,7 @@ public void testOnlyAccessAclNewFile() throws Exception { @Test public void testDefaultMinimalAclNewFile() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -1094,7 +1097,7 @@ public void testDefaultMinimalAclNewFile() throws Exception { @Test public void testDefaultAclNewDir() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -1120,7 +1123,7 @@ public void testDefaultAclNewDir() throws Exception { @Test public void testOnlyAccessAclNewDir() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -1137,7 +1140,7 @@ public void testOnlyAccessAclNewDir() throws Exception { @Test public void testDefaultMinimalAclNewDir() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -1159,7 +1162,7 @@ public void testDefaultMinimalAclNewDir() throws Exception { @Test public void testDefaultAclNewFileWithMode() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX_RX)); List aclSpec = Lists.newArrayList( @@ -1181,7 +1184,7 @@ public void testDefaultAclNewFileWithMode() throws Exception { @Test public void testDefaultAclNewDirWithMode() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX_RX)); List aclSpec = Lists.newArrayList( @@ -1205,7 +1208,7 @@ public void testDefaultAclNewDirWithMode() throws Exception { @Test public void testDefaultAclRenamedFile() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); Path dirPath = new Path(path, "dir"); FileSystem.mkdirs(fs, dirPath, FsPermission.createImmutable((short) RWX_RX)); @@ -1216,7 +1219,12 @@ public void testDefaultAclRenamedFile() throws Exception { fs.create(filePath).close(); fs.setPermission(filePath, FsPermission.createImmutable((short) RW_R)); Path renamedFilePath = new Path(dirPath, "file1"); + + fs.registerListener(new TracingHeaderValidator( + fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), + fs.getFileSystemId(), FSOperationType.RENAME, true, 0)); fs.rename(filePath, renamedFilePath); + fs.registerListener(null); AclEntry[] expected = new AclEntry[] { }; AclStatus s = fs.getAclStatus(renamedFilePath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); @@ -1227,7 +1235,7 @@ public void testDefaultAclRenamedFile() throws Exception { @Test public void testDefaultAclRenamedDir() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); path = new Path(testRoot, UUID.randomUUID().toString()); Path dirPath = new Path(path, "dir"); FileSystem.mkdirs(fs, dirPath, FsPermission.createImmutable((short) RWX_RX)); @@ -1248,39 +1256,58 @@ public void testDefaultAclRenamedDir() throws Exception { @Test public void testEnsureAclOperationWorksForRoot() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(fs.getIsNamespaceEnabled()); + assumeTrue(getIsNamespaceEnabled(fs)); Path rootPath = new Path("/"); List aclSpec1 = Lists.newArrayList( aclEntry(DEFAULT, GROUP, FOO, ALL), aclEntry(ACCESS, GROUP, BAR, ALL)); + + fs.registerListener(new TracingHeaderValidator( + fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), + fs.getFileSystemId(), FSOperationType.SET_ACL, true, 0)); fs.setAcl(rootPath, aclSpec1); + + fs.setListenerOperation(FSOperationType.GET_ACL_STATUS); fs.getAclStatus(rootPath); + fs.setListenerOperation(FSOperationType.SET_OWNER); fs.setOwner(rootPath, TEST_OWNER, TEST_GROUP); + fs.setListenerOperation(FSOperationType.SET_PERMISSION); fs.setPermission(rootPath, new FsPermission("777")); List aclSpec2 = Lists.newArrayList( aclEntry(DEFAULT, USER, FOO, ALL), aclEntry(ACCESS, USER, BAR, ALL)); + fs.setListenerOperation(FSOperationType.MODIFY_ACL); fs.modifyAclEntries(rootPath, aclSpec2); + fs.setListenerOperation(FSOperationType.REMOVE_ACL_ENTRIES); fs.removeAclEntries(rootPath, aclSpec2); + fs.setListenerOperation(FSOperationType.REMOVE_DEFAULT_ACL); fs.removeDefaultAcl(rootPath); + fs.setListenerOperation(FSOperationType.REMOVE_ACL); fs.removeAcl(rootPath); } @Test public void testSetOwnerForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!fs.getIsNamespaceEnabled()); + AbfsConfiguration conf = fs.getAbfsStore().getAbfsConfiguration(); + Assume.assumeTrue(!getIsNamespaceEnabled(fs)); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); - assertTrue(fs.exists(filePath)); + assertPathExists(fs, "This path should exist", filePath); + TracingHeaderValidator tracingHeaderValidator = new TracingHeaderValidator( + conf.getClientCorrelationId(), fs.getFileSystemId(), + FSOperationType.GET_FILESTATUS, false, 0); + fs.registerListener(tracingHeaderValidator); FileStatus oldFileStatus = fs.getFileStatus(filePath); + tracingHeaderValidator.setOperation(FSOperationType.SET_OWNER); fs.setOwner(filePath, TEST_OWNER, TEST_GROUP); + fs.registerListener(null); FileStatus newFileStatus = fs.getFileStatus(filePath); assertEquals(oldFileStatus.getOwner(), newFileStatus.getOwner()); @@ -1290,11 +1317,11 @@ public void testSetOwnerForNonNamespaceEnabledAccount() throws Exception { @Test public void testSetPermissionForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!fs.getIsNamespaceEnabled()); + Assume.assumeTrue(!getIsNamespaceEnabled(fs)); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); - assertTrue(fs.exists(filePath)); + assertPathExists(fs, "This path should exist", filePath); FsPermission oldPermission = fs.getFileStatus(filePath).getPermission(); // default permission for non-namespace enabled account is "777" FsPermission newPermission = new FsPermission("557"); @@ -1309,7 +1336,7 @@ public void testSetPermissionForNonNamespaceEnabledAccount() throws Exception { @Test public void testModifyAclEntriesForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!fs.getIsNamespaceEnabled()); + Assume.assumeTrue(!getIsNamespaceEnabled(fs)); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { @@ -1326,7 +1353,7 @@ public void testModifyAclEntriesForNonNamespaceEnabledAccount() throws Exception @Test public void testRemoveAclEntriesEntriesForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!fs.getIsNamespaceEnabled()); + Assume.assumeTrue(!getIsNamespaceEnabled(fs)); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { @@ -1343,7 +1370,7 @@ public void testRemoveAclEntriesEntriesForNonNamespaceEnabledAccount() throws Ex @Test public void testRemoveDefaultAclForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!fs.getIsNamespaceEnabled()); + Assume.assumeTrue(!getIsNamespaceEnabled(fs)); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { @@ -1357,7 +1384,7 @@ public void testRemoveDefaultAclForNonNamespaceEnabledAccount() throws Exception @Test public void testRemoveAclForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!fs.getIsNamespaceEnabled()); + Assume.assumeTrue(!getIsNamespaceEnabled(fs)); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { @@ -1371,7 +1398,7 @@ public void testRemoveAclForNonNamespaceEnabledAccount() throws Exception { @Test public void testSetAclForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!fs.getIsNamespaceEnabled()); + Assume.assumeTrue(!getIsNamespaceEnabled(fs)); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { @@ -1388,7 +1415,7 @@ public void testSetAclForNonNamespaceEnabledAccount() throws Exception { @Test public void testGetAclStatusForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!fs.getIsNamespaceEnabled()); + Assume.assumeTrue(!getIsNamespaceEnabled(fs)); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java index 9229905b4623c..0873b8e24b5af 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java @@ -33,7 +33,9 @@ import java.util.Map; import java.util.Optional; import java.util.Random; +import java.util.UUID; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; @@ -106,17 +108,19 @@ public ITestCustomerProvidedKey() throws Exception { @Test public void testReadWithCPK() throws Exception { final AzureBlobFileSystem fs = getAbfs(true); - String fileName = "/" + methodName.getMethodName(); + String fileName = path("/" + methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); int length = FILE_SIZE; byte[] buffer = new byte[length]; - final AbfsRestOperation op = abfsClient.getPathStatus(fileName, false); + TracingContext tracingContext = getTestTracingContext(fs, false); + final AbfsRestOperation op = abfsClient.getPathStatus(fileName, false, + tracingContext); final String eTag = op.getResult() .getResponseHeader(HttpHeaderConfigurations.ETAG); AbfsRestOperation abfsRestOperation = abfsClient - .read(fileName, 0, buffer, 0, length, eTag, null); + .read(fileName, 0, buffer, 0, length, eTag, null, tracingContext); assertCPKHeaders(abfsRestOperation, true); assertResponseHeader(abfsRestOperation, true, X_MS_ENCRYPTION_KEY_SHA256, getCPKSha(fs)); @@ -154,17 +158,19 @@ public void testReadWithCPK() throws Exception { @Test public void testReadWithoutCPK() throws Exception { final AzureBlobFileSystem fs = getAbfs(false); - String fileName = "/" + methodName.getMethodName(); + String fileName = path("/" + methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); int length = INT_512; byte[] buffer = new byte[length * 4]; - final AbfsRestOperation op = abfsClient.getPathStatus(fileName, false); + TracingContext tracingContext = getTestTracingContext(fs, false); + final AbfsRestOperation op = abfsClient + .getPathStatus(fileName, false, tracingContext); final String eTag = op.getResult() .getResponseHeader(HttpHeaderConfigurations.ETAG); AbfsRestOperation abfsRestOperation = abfsClient - .read(fileName, 0, buffer, 0, length, eTag, null); + .read(fileName, 0, buffer, 0, length, eTag, null, tracingContext); assertCPKHeaders(abfsRestOperation, false); assertResponseHeader(abfsRestOperation, false, X_MS_ENCRYPTION_KEY_SHA256, getCPKSha(fs)); @@ -182,7 +188,8 @@ public void testReadWithoutCPK() throws Exception { try (AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf); AbfsClient abfsClient2 = fs2.getAbfsClient()) { LambdaTestUtils.intercept(IOException.class, () -> { - abfsClient2.read(fileName, 0, buffer, 0, length, eTag, null); + abfsClient2.read(fileName, 0, buffer, 0, length, eTag, null, + getTestTracingContext(fs, false)); }); } } @@ -190,7 +197,7 @@ public void testReadWithoutCPK() throws Exception { @Test public void testAppendWithCPK() throws Exception { final AzureBlobFileSystem fs = getAbfs(true); - final String fileName = "/" + methodName.getMethodName(); + final String fileName = path("/" + methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); // Trying to append with correct CPK headers @@ -200,7 +207,7 @@ public void testAppendWithCPK() throws Exception { byte[] buffer = getRandomBytesArray(5); AbfsClient abfsClient = fs.getAbfsClient(); AbfsRestOperation abfsRestOperation = abfsClient - .append(fileName, buffer, appendRequestParameters, null); + .append(fileName, buffer, appendRequestParameters, null, getTestTracingContext(fs, false)); assertCPKHeaders(abfsRestOperation, true); assertResponseHeader(abfsRestOperation, true, X_MS_ENCRYPTION_KEY_SHA256, getCPKSha(fs)); @@ -216,7 +223,8 @@ public void testAppendWithCPK() throws Exception { try (AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf); AbfsClient abfsClient2 = fs2.getAbfsClient()) { LambdaTestUtils.intercept(IOException.class, () -> { - abfsClient2.append(fileName, buffer, appendRequestParameters, null); + abfsClient2.append(fileName, buffer, appendRequestParameters, null, + getTestTracingContext(fs, false)); }); } @@ -225,7 +233,8 @@ public void testAppendWithCPK() throws Exception { try (AzureBlobFileSystem fs3 = (AzureBlobFileSystem) FileSystem .get(conf); AbfsClient abfsClient3 = fs3.getAbfsClient()) { LambdaTestUtils.intercept(IOException.class, () -> { - abfsClient3.append(fileName, buffer, appendRequestParameters, null); + abfsClient3.append(fileName, buffer, appendRequestParameters, null, + getTestTracingContext(fs, false)); }); } } @@ -233,7 +242,7 @@ public void testAppendWithCPK() throws Exception { @Test public void testAppendWithoutCPK() throws Exception { final AzureBlobFileSystem fs = getAbfs(false); - final String fileName = "/" + methodName.getMethodName(); + final String fileName = path("/" + methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); // Trying to append without CPK headers @@ -243,7 +252,8 @@ public void testAppendWithoutCPK() throws Exception { byte[] buffer = getRandomBytesArray(5); AbfsClient abfsClient = fs.getAbfsClient(); AbfsRestOperation abfsRestOperation = abfsClient - .append(fileName, buffer, appendRequestParameters, null); + .append(fileName, buffer, appendRequestParameters, null, + getTestTracingContext(fs, false)); assertCPKHeaders(abfsRestOperation, false); assertResponseHeader(abfsRestOperation, false, X_MS_ENCRYPTION_KEY_SHA256, ""); @@ -259,7 +269,8 @@ public void testAppendWithoutCPK() throws Exception { try (AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf); AbfsClient abfsClient2 = fs2.getAbfsClient()) { LambdaTestUtils.intercept(IOException.class, () -> { - abfsClient2.append(fileName, buffer, appendRequestParameters, null); + abfsClient2.append(fileName, buffer, appendRequestParameters, null, + getTestTracingContext(fs, false)); }); } } @@ -267,7 +278,7 @@ public void testAppendWithoutCPK() throws Exception { @Test public void testSetGetXAttr() throws Exception { final AzureBlobFileSystem fs = getAbfs(true); - String fileName = methodName.getMethodName(); + final String fileName = path(methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); String valSent = "testValue"; @@ -315,7 +326,8 @@ public void testCopyBetweenAccounts() throws Exception { AzureBlobFileSystem fs1 = getAbfs(true); int fileSize = FILE_SIZE_FOR_COPY_BETWEEN_ACCOUNTS; byte[] fileContent = getRandomBytesArray(fileSize); - Path testFilePath = createFileWithContent(fs1, "fs1-file.txt", fileContent); + Path testFilePath = createFileWithContent(fs1, + String.format("fs1-file%s.txt", UUID.randomUUID()), fileContent); // Create fs2 with different CPK Configuration conf = new Configuration(); @@ -330,7 +342,8 @@ public void testCopyBetweenAccounts() throws Exception { AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf); // Read from fs1 and write to fs2, fs1 and fs2 are having different CPK - Path fs2DestFilePath = new Path("fs2-dest-file.txt"); + Path fs2DestFilePath = new Path( + String.format("fs2-dest-file%s.txt", UUID.randomUUID())); FSDataOutputStream ops = fs2.create(fs2DestFilePath); try (FSDataInputStream iStream = fs1.open(testFilePath)) { long totalBytesRead = 0; @@ -398,14 +411,15 @@ public void testListPathWithoutCPK() throws Exception { private void testListPath(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - String testDirName = "/" + methodName.getMethodName(); - final Path testPath = new Path(testDirName); + final Path testPath = path("/" + methodName.getMethodName()); + String testDirName = testPath.toString(); fs.mkdirs(testPath); createFileAndGetContent(fs, testDirName + "/aaa", FILE_SIZE); createFileAndGetContent(fs, testDirName + "/bbb", FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); AbfsRestOperation abfsRestOperation = abfsClient - .listPath(testDirName, false, INT_50, null); + .listPath(testDirName, false, INT_50, null, + getTestTracingContext(fs, false)); assertListstatus(fs, abfsRestOperation, testPath); // Trying with different CPK headers @@ -415,7 +429,9 @@ private void testListPath(final boolean isWithCPK) throws Exception { "different-1234567890123456789012"); AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf); AbfsClient abfsClient2 = fs2.getAbfsClient(); - abfsRestOperation = abfsClient2.listPath(testDirName, false, INT_50, null); + TracingContext tracingContext = getTestTracingContext(fs, false); + abfsRestOperation = abfsClient2.listPath(testDirName, false, INT_50, + null, tracingContext); assertListstatus(fs, abfsRestOperation, testPath); if (isWithCPK) { @@ -424,7 +440,7 @@ private void testListPath(final boolean isWithCPK) throws Exception { AzureBlobFileSystem fs3 = (AzureBlobFileSystem) FileSystem.get(conf); AbfsClient abfsClient3 = fs3.getAbfsClient(); abfsRestOperation = abfsClient3 - .listPath(testDirName, false, INT_50, null); + .listPath(testDirName, false, INT_50, null, tracingContext); assertListstatus(fs, abfsRestOperation, testPath); } } @@ -455,7 +471,8 @@ public void testCreatePathWithoutCPK() throws Exception { private void testCreatePath(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -463,11 +480,13 @@ private void testCreatePath(final boolean isWithCPK) throws Exception { FsAction.EXECUTE, FsAction.EXECUTE); FsPermission umask = new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.NONE); - boolean isNamespaceEnabled = fs.getIsNamespaceEnabled(); + TracingContext tracingContext = getTestTracingContext(fs, false); + boolean isNamespaceEnabled = fs.getIsNamespaceEnabled(tracingContext); AbfsRestOperation abfsRestOperation = abfsClient .createPath(testFileName, true, true, isNamespaceEnabled ? getOctalNotation(permission) : null, - isNamespaceEnabled ? getOctalNotation(umask) : null, false, null); + isNamespaceEnabled ? getOctalNotation(umask) : null, false, null, + tracingContext); assertCPKHeaders(abfsRestOperation, isWithCPK); assertResponseHeader(abfsRestOperation, isWithCPK, X_MS_ENCRYPTION_KEY_SHA256, getCPKSha(fs)); @@ -496,7 +515,8 @@ public void testRenamePathWithoutCPK() throws Exception { private void testRenamePath(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); FileStatus fileStatusBeforeRename = fs @@ -505,7 +525,8 @@ private void testRenamePath(final boolean isWithCPK) throws Exception { String newName = "/newName"; AbfsClient abfsClient = fs.getAbfsClient(); AbfsRestOperation abfsRestOperation = abfsClient - .renamePath(testFileName, newName, null); + .renamePath(testFileName, newName, null, + getTestTracingContext(fs, false)); assertCPKHeaders(abfsRestOperation, false); assertNoCPKResponseHeadersPresent(abfsRestOperation); @@ -530,15 +551,17 @@ public void testFlushWithoutCPK() throws Exception { private void testFlush(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); - fs.create(new Path(testFileName)); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); + fs.create(new Path(testFileName)).close(); AbfsClient abfsClient = fs.getAbfsClient(); String expectedCPKSha = getCPKSha(fs); byte[] fileContent = getRandomBytesArray(FILE_SIZE); Path testFilePath = new Path(testFileName + "1"); - FSDataOutputStream oStream = fs.create(testFilePath); - oStream.write(fileContent); + try (FSDataOutputStream oStream = fs.create(testFilePath)) { + oStream.write(fileContent); + } // Trying to read with different CPK headers Configuration conf = fs.getConf(); @@ -548,7 +571,8 @@ private void testFlush(final boolean isWithCPK) throws Exception { try (AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf); AbfsClient abfsClient2 = fs2.getAbfsClient()) { LambdaTestUtils.intercept(IOException.class, () -> { - abfsClient2.flush(testFileName, 0, false, false, null, null); + abfsClient2.flush(testFileName, 0, false, false, null, null, + getTestTracingContext(fs, false)); }); } @@ -558,14 +582,16 @@ private void testFlush(final boolean isWithCPK) throws Exception { try (AzureBlobFileSystem fs3 = (AzureBlobFileSystem) FileSystem .get(conf); AbfsClient abfsClient3 = fs3.getAbfsClient()) { LambdaTestUtils.intercept(IOException.class, () -> { - abfsClient3.flush(testFileName, 0, false, false, null, null); + abfsClient3.flush(testFileName, 0, false, false, null, null, + getTestTracingContext(fs, false)); }); } } // With correct CPK AbfsRestOperation abfsRestOperation = abfsClient - .flush(testFileName, 0, false, false, null, null); + .flush(testFileName, 0, false, false, null, null, + getTestTracingContext(fs, false)); assertCPKHeaders(abfsRestOperation, isWithCPK); assertResponseHeader(abfsRestOperation, isWithCPK, X_MS_ENCRYPTION_KEY_SHA256, expectedCPKSha); @@ -586,7 +612,8 @@ public void testSetPathPropertiesWithoutCPK() throws Exception { private void testSetPathProperties(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -594,7 +621,8 @@ private void testSetPathProperties(final boolean isWithCPK) throws Exception { properties.put("key", "val"); AbfsRestOperation abfsRestOperation = abfsClient .setPathProperties(testFileName, - convertXmsPropertiesToCommaSeparatedString(properties)); + convertXmsPropertiesToCommaSeparatedString(properties), + getTestTracingContext(fs, false)); assertCPKHeaders(abfsRestOperation, isWithCPK); assertResponseHeader(abfsRestOperation, isWithCPK, X_MS_ENCRYPTION_KEY_SHA256, getCPKSha(fs)); @@ -615,12 +643,14 @@ public void testGetPathStatusFileWithoutCPK() throws Exception { private void testGetPathStatusFile(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); + TracingContext tracingContext = getTestTracingContext(fs, false); AbfsRestOperation abfsRestOperation = abfsClient - .getPathStatus(testFileName, false); + .getPathStatus(testFileName, false, tracingContext); assertCPKHeaders(abfsRestOperation, false); assertResponseHeader(abfsRestOperation, isWithCPK, X_MS_ENCRYPTION_KEY_SHA256, getCPKSha(fs)); @@ -629,7 +659,7 @@ private void testGetPathStatusFile(final boolean isWithCPK) throws Exception { assertResponseHeader(abfsRestOperation, false, X_MS_REQUEST_SERVER_ENCRYPTED, ""); - abfsRestOperation = abfsClient.getPathStatus(testFileName, true); + abfsRestOperation = abfsClient.getPathStatus(testFileName, true, tracingContext); assertCPKHeaders(abfsRestOperation, isWithCPK); assertResponseHeader(abfsRestOperation, isWithCPK, X_MS_ENCRYPTION_KEY_SHA256, getCPKSha(fs)); @@ -651,7 +681,8 @@ public void testDeletePathWithoutCPK() throws Exception { private void testDeletePath(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); FileStatus[] listStatuses = fs.listStatus(new Path(testFileName)); @@ -660,7 +691,8 @@ private void testDeletePath(final boolean isWithCPK) throws Exception { AbfsClient abfsClient = fs.getAbfsClient(); AbfsRestOperation abfsRestOperation = abfsClient - .deletePath(testFileName, false, null); + .deletePath(testFileName, false, null, + getTestTracingContext(fs, false)); assertCPKHeaders(abfsRestOperation, false); assertNoCPKResponseHeadersPresent(abfsRestOperation); @@ -680,14 +712,16 @@ public void testSetPermissionWithoutCPK() throws Exception { private void testSetPermission(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); - Assume.assumeTrue(fs.getIsNamespaceEnabled()); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); + Assume.assumeTrue(fs.getIsNamespaceEnabled(getTestTracingContext(fs, false))); createFileAndGetContent(fs, testFileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); FsPermission permission = new FsPermission(FsAction.EXECUTE, FsAction.EXECUTE, FsAction.EXECUTE); AbfsRestOperation abfsRestOperation = abfsClient - .setPermission(testFileName, permission.toString()); + .setPermission(testFileName, permission.toString(), + getTestTracingContext(fs, false)); assertCPKHeaders(abfsRestOperation, false); assertNoCPKResponseHeadersPresent(abfsRestOperation); } @@ -704,8 +738,10 @@ public void testSetAclWithoutCPK() throws Exception { private void testSetAcl(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); - Assume.assumeTrue(fs.getIsNamespaceEnabled()); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); + TracingContext tracingContext = getTestTracingContext(fs, false); + Assume.assumeTrue(fs.getIsNamespaceEnabled(tracingContext)); createFileAndGetContent(fs, testFileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -714,7 +750,8 @@ private void testSetAcl(final boolean isWithCPK) throws Exception { .deserializeAclSpec(AclEntry.aclSpecToString(aclSpec)); AbfsRestOperation abfsRestOperation = abfsClient - .setAcl(testFileName, AbfsAclHelper.serializeAclSpec(aclEntries)); + .setAcl(testFileName, AbfsAclHelper.serializeAclSpec(aclEntries), + tracingContext); assertCPKHeaders(abfsRestOperation, false); assertNoCPKResponseHeadersPresent(abfsRestOperation); } @@ -731,11 +768,14 @@ public void testGetAclWithoutCPK() throws Exception { private void testGetAcl(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); - Assume.assumeTrue(fs.getIsNamespaceEnabled()); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); + TracingContext tracingContext = getTestTracingContext(fs, false); + Assume.assumeTrue(fs.getIsNamespaceEnabled(tracingContext)); createFileAndGetContent(fs, testFileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); - AbfsRestOperation abfsRestOperation = abfsClient.getAclStatus(testFileName); + AbfsRestOperation abfsRestOperation = + abfsClient.getAclStatus(testFileName, tracingContext); assertCPKHeaders(abfsRestOperation, false); assertNoCPKResponseHeadersPresent(abfsRestOperation); } @@ -759,11 +799,12 @@ private void testCheckAccess(final boolean isWithCPK) throws Exception { getAuthType() == AuthType.OAuth); final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); - fs.create(new Path(testFileName)); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); + fs.create(new Path(testFileName)).close(); AbfsClient abfsClient = fs.getAbfsClient(); AbfsRestOperation abfsRestOperation = abfsClient - .checkAccess(testFileName, "rwx"); + .checkAccess(testFileName, "rwx", getTestTracingContext(fs, false)); assertCPKHeaders(abfsRestOperation, false); assertNoCPKResponseHeadersPresent(abfsRestOperation); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java index ba9b639adb602..0ccef2e6ccb34 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java @@ -26,22 +26,29 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; /** * Test FileSystemProperties. */ public class ITestFileSystemProperties extends AbstractAbfsIntegrationTest { private static final int TEST_DATA = 100; - private static final Path TEST_PATH = new Path("/testfile"); + private static final String TEST_PATH = "/testfile"; public ITestFileSystemProperties() throws Exception { } @Test public void testReadWriteBytesToFileAndEnsureThreadPoolCleanup() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - testWriteOneByteToFileAndEnsureThreadPoolCleanup(); + Path testPath = path(TEST_PATH); + try(FSDataOutputStream stream = fs.create(testPath)) { + stream.write(TEST_DATA); + } + + FileStatus fileStatus = fs.getFileStatus(testPath); + assertEquals(1, fileStatus.getLen()); - try(FSDataInputStream inputStream = fs.open(TEST_PATH, 4 * 1024 * 1024)) { + try(FSDataInputStream inputStream = fs.open(testPath, 4 * 1024 * 1024)) { int i = inputStream.read(); assertEquals(TEST_DATA, i); } @@ -50,11 +57,12 @@ public void testReadWriteBytesToFileAndEnsureThreadPoolCleanup() throws Exceptio @Test public void testWriteOneByteToFileAndEnsureThreadPoolCleanup() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - try(FSDataOutputStream stream = fs.create(TEST_PATH)) { + Path testPath = path(TEST_PATH); + try(FSDataOutputStream stream = fs.create(testPath)) { stream.write(TEST_DATA); } - FileStatus fileStatus = fs.getFileStatus(TEST_PATH); + FileStatus fileStatus = fs.getFileStatus(testPath); assertEquals(1, fileStatus.getLen()); } @@ -64,8 +72,10 @@ public void testBase64FileSystemProperties() throws Exception { final Hashtable properties = new Hashtable<>(); properties.put("key", "{ value: value }"); - fs.getAbfsStore().setFilesystemProperties(properties); - Hashtable fetchedProperties = fs.getAbfsStore().getFilesystemProperties(); + TracingContext tracingContext = getTestTracingContext(fs, true); + fs.getAbfsStore().setFilesystemProperties(properties, tracingContext); + Hashtable fetchedProperties = fs.getAbfsStore() + .getFilesystemProperties(tracingContext); assertEquals(properties, fetchedProperties); } @@ -75,10 +85,12 @@ public void testBase64PathProperties() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); final Hashtable properties = new Hashtable<>(); properties.put("key", "{ value: valueTest }"); - touch(TEST_PATH); - fs.getAbfsStore().setPathProperties(TEST_PATH, properties); - Hashtable fetchedProperties = - fs.getAbfsStore().getPathStatus(TEST_PATH); + Path testPath = path(TEST_PATH); + touch(testPath); + TracingContext tracingContext = getTestTracingContext(fs, true); + fs.getAbfsStore().setPathProperties(testPath, properties, tracingContext); + Hashtable fetchedProperties = fs.getAbfsStore() + .getPathStatus(testPath, tracingContext); assertEquals(properties, fetchedProperties); } @@ -88,8 +100,10 @@ public void testBase64InvalidFileSystemProperties() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); final Hashtable properties = new Hashtable<>(); properties.put("key", "{ value: value歲 }"); - fs.getAbfsStore().setFilesystemProperties(properties); - Hashtable fetchedProperties = fs.getAbfsStore().getFilesystemProperties(); + TracingContext tracingContext = getTestTracingContext(fs, true); + fs.getAbfsStore().setFilesystemProperties(properties, tracingContext); + Hashtable fetchedProperties = fs.getAbfsStore() + .getFilesystemProperties(tracingContext); assertEquals(properties, fetchedProperties); } @@ -99,9 +113,12 @@ public void testBase64InvalidPathProperties() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); final Hashtable properties = new Hashtable<>(); properties.put("key", "{ value: valueTest兩 }"); - touch(TEST_PATH); - fs.getAbfsStore().setPathProperties(TEST_PATH, properties); - Hashtable fetchedProperties = fs.getAbfsStore().getPathStatus(TEST_PATH); + Path testPath = path(TEST_PATH); + touch(testPath); + TracingContext tracingContext = getTestTracingContext(fs, true); + fs.getAbfsStore().setPathProperties(testPath, properties, tracingContext); + Hashtable fetchedProperties = fs.getAbfsStore() + .getPathStatus(testPath, tracingContext); assertEquals(properties, fetchedProperties); } @@ -111,8 +128,10 @@ public void testSetFileSystemProperties() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); final Hashtable properties = new Hashtable<>(); properties.put("containerForDevTest", "true"); - fs.getAbfsStore().setFilesystemProperties(properties); - Hashtable fetchedProperties = fs.getAbfsStore().getFilesystemProperties(); + TracingContext tracingContext = getTestTracingContext(fs, true); + fs.getAbfsStore().setFilesystemProperties(properties, tracingContext); + Hashtable fetchedProperties = fs.getAbfsStore() + .getFilesystemProperties(tracingContext); assertEquals(properties, fetchedProperties); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java index 29de126c4cc40..01227691c3139 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.UUID; -import org.apache.hadoop.fs.azurebfs.enums.Trilean; import org.junit.Assume; import org.junit.Test; import org.assertj.core.api.Assertions; @@ -32,7 +31,10 @@ import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.enums.Trilean; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -65,7 +67,7 @@ public void testXNSAccount() throws IOException { Assume.assumeTrue("Skip this test because the account being used for test is a non XNS account", isUsingXNSAccount); assertTrue("Expecting getIsNamespaceEnabled() return true", - getFileSystem().getIsNamespaceEnabled()); + getIsNamespaceEnabled(getFileSystem())); } @Test @@ -73,26 +75,26 @@ public void testNonXNSAccount() throws IOException { Assume.assumeFalse("Skip this test because the account being used for test is a XNS account", isUsingXNSAccount); assertFalse("Expecting getIsNamespaceEnabled() return false", - getFileSystem().getIsNamespaceEnabled()); + getIsNamespaceEnabled(getFileSystem())); } @Test public void testGetIsNamespaceEnabledWhenConfigIsTrue() throws Exception { AzureBlobFileSystem fs = getNewFSWithHnsConf(TRUE_STR); - Assertions.assertThat(fs.getIsNamespaceEnabled()).describedAs( + Assertions.assertThat(getIsNamespaceEnabled(fs)).describedAs( "getIsNamespaceEnabled should return true when the " + "config is set as true").isTrue(); - fs.getAbfsStore().deleteFilesystem(); + fs.getAbfsStore().deleteFilesystem(getTestTracingContext(fs, false)); unsetAndAssert(); } @Test public void testGetIsNamespaceEnabledWhenConfigIsFalse() throws Exception { AzureBlobFileSystem fs = getNewFSWithHnsConf(FALSE_STR); - Assertions.assertThat(fs.getIsNamespaceEnabled()).describedAs( + Assertions.assertThat(getIsNamespaceEnabled(fs)).describedAs( "getIsNamespaceEnabled should return false when the " + "config is set as false").isFalse(); - fs.getAbfsStore().deleteFilesystem(); + fs.getAbfsStore().deleteFilesystem(getTestTracingContext(fs, false)); unsetAndAssert(); } @@ -101,11 +103,11 @@ private void unsetAndAssert() throws Exception { DEFAULT_FS_AZURE_ACCOUNT_IS_HNS_ENABLED); boolean expectedValue = this.getConfiguration() .getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, false); - Assertions.assertThat(fs.getIsNamespaceEnabled()).describedAs( + Assertions.assertThat(getIsNamespaceEnabled(fs)).describedAs( "getIsNamespaceEnabled should return the value " + "configured for fs.azure.test.namespace.enabled") .isEqualTo(expectedValue); - fs.getAbfsStore().deleteFilesystem(); + fs.getAbfsStore().deleteFilesystem(getTestTracingContext(fs, false)); } private AzureBlobFileSystem getNewFSWithHnsConf( @@ -178,7 +180,8 @@ private void ensureGetAclCallIsMadeOnceForInvalidConf(String invalidConf) .setNamespaceEnabled(Trilean.getTrilean(invalidConf)); AbfsClient mockClient = callAbfsGetIsNamespaceEnabledAndReturnMockAbfsClient(); - verify(mockClient, times(1)).getAclStatus(anyString()); + verify(mockClient, times(1)) + .getAclStatus(anyString(), any(TracingContext.class)); } private void ensureGetAclCallIsNeverMadeForValidConf(String validConf) @@ -187,14 +190,16 @@ private void ensureGetAclCallIsNeverMadeForValidConf(String validConf) .setNamespaceEnabled(Trilean.getTrilean(validConf)); AbfsClient mockClient = callAbfsGetIsNamespaceEnabledAndReturnMockAbfsClient(); - verify(mockClient, never()).getAclStatus(anyString()); + verify(mockClient, never()) + .getAclStatus(anyString(), any(TracingContext.class)); } private void unsetConfAndEnsureGetAclCallIsMadeOnce() throws IOException { this.getFileSystem().getAbfsStore().setNamespaceEnabled(Trilean.UNKNOWN); AbfsClient mockClient = callAbfsGetIsNamespaceEnabledAndReturnMockAbfsClient(); - verify(mockClient, times(1)).getAclStatus(anyString()); + verify(mockClient, times(1)) + .getAclStatus(anyString(), any(TracingContext.class)); } private AbfsClient callAbfsGetIsNamespaceEnabledAndReturnMockAbfsClient() @@ -203,9 +208,9 @@ private AbfsClient callAbfsGetIsNamespaceEnabledAndReturnMockAbfsClient() final AzureBlobFileSystemStore abfsStore = abfs.getAbfsStore(); final AbfsClient mockClient = mock(AbfsClient.class); doReturn(mock(AbfsRestOperation.class)).when(mockClient) - .getAclStatus(anyString()); + .getAclStatus(anyString(), any(TracingContext.class)); abfsStore.setClient(mockClient); - abfs.getIsNamespaceEnabled(); + getIsNamespaceEnabled(abfs); return mockClient; } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSharedKeyAuth.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSharedKeyAuth.java index ab55ffa3fe3c6..fedddcc4b16fb 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSharedKeyAuth.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSharedKeyAuth.java @@ -54,7 +54,8 @@ public void testWithWrongSharedKey() throws Exception { + "Authorization header is formed correctly including the " + "signature.\", 403", () -> { - abfsClient.getAclStatus("/"); + abfsClient + .getAclStatus("/", getTestTracingContext(getFileSystem(), false)); }); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java index 609512d9c0478..0534cdda99fc8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java @@ -58,11 +58,12 @@ public void testListFileStatus() throws Exception { AzureBlobFileSystem fs = getFileSystem(); // test only valid for non-namespace enabled account Assume.assumeFalse("Namespace enabled account does not support this test,", - fs.getIsNamespaceEnabled()); + getIsNamespaceEnabled(fs)); NativeAzureFileSystem wasb = getWasbFileSystem(); - Path path1 = new Path("/testfiles/~12/!008/3/abFsTestfile"); + Path testFiles = path("/testfiles"); + Path path1 = new Path(testFiles + "/~12/!008/3/abFsTestfile"); try(FSDataOutputStream abfsStream = fs.create(path1, true)) { abfsStream.write(ABFS_TEST_CONTEXT.getBytes()); abfsStream.flush(); @@ -70,7 +71,7 @@ public void testListFileStatus() throws Exception { } // create file using wasb - Path path2 = new Path("/testfiles/~12/!008/3/nativeFsTestfile"); + Path path2 = new Path(testFiles + "/~12/!008/3/nativeFsTestfile"); LOG.info("{}", wasb.getUri()); try(FSDataOutputStream nativeFsStream = wasb.create(path2, true)) { nativeFsStream.write(WASB_TEST_CONTEXT.getBytes()); @@ -78,8 +79,8 @@ public void testListFileStatus() throws Exception { nativeFsStream.hsync(); } // list file using abfs and wasb - FileStatus[] abfsFileStatus = fs.listStatus(new Path("/testfiles/~12/!008/3/")); - FileStatus[] nativeFsFileStatus = wasb.listStatus(new Path("/testfiles/~12/!008/3/")); + FileStatus[] abfsFileStatus = fs.listStatus(new Path(testFiles + "/~12/!008/3/")); + FileStatus[] nativeFsFileStatus = wasb.listStatus(new Path(testFiles + "/~12/!008/3/")); assertEquals(2, abfsFileStatus.length); assertEquals(2, nativeFsFileStatus.length); @@ -93,12 +94,13 @@ public void testReadFile() throws Exception { AzureBlobFileSystem abfs = getFileSystem(); // test only valid for non-namespace enabled account Assume.assumeFalse("Namespace enabled account does not support this test", - abfs.getIsNamespaceEnabled()); + getIsNamespaceEnabled(abfs)); NativeAzureFileSystem wasb = getWasbFileSystem(); + Path testFile = path("/testReadFile"); for (int i = 0; i< 4; i++) { - Path path = new Path("/testReadFile/~12/!008/testfile" + i); + Path path = new Path(testFile + "/~12/!008/testfile" + i); final FileSystem createFs = createFileWithAbfs[i] ? abfs : wasb; // Write @@ -133,12 +135,13 @@ public void testDir() throws Exception { AzureBlobFileSystem abfs = getFileSystem(); // test only valid for non-namespace enabled account Assume.assumeFalse("Namespace enabled account does not support this test", - abfs.getIsNamespaceEnabled()); + getIsNamespaceEnabled(abfs)); NativeAzureFileSystem wasb = getWasbFileSystem(); + Path testDir = path("/testDir"); for (int i = 0; i < 4; i++) { - Path path = new Path("/testDir/t" + i); + Path path = new Path(testDir + "/t" + i); //create final FileSystem createFs = createDirWithAbfs[i] ? abfs : wasb; assertTrue(createFs.mkdirs(path)); @@ -168,15 +171,16 @@ public void testSetWorkingDirectory() throws Exception { AzureBlobFileSystem abfs = getFileSystem(); // test only valid for non-namespace enabled account Assume.assumeFalse("Namespace enabled account does not support this test", - abfs.getIsNamespaceEnabled()); + getIsNamespaceEnabled(abfs)); NativeAzureFileSystem wasb = getWasbFileSystem(); - Path d1d4 = new Path("/d1/d2/d3/d4"); + Path d1 = path("/d1"); + Path d1d4 = new Path(d1 + "/d2/d3/d4"); assertMkdirs(abfs, d1d4); //set working directory to path1 - Path path1 = new Path("/d1/d2"); + Path path1 = new Path(d1 + "/d2"); wasb.setWorkingDirectory(path1); abfs.setWorkingDirectory(path1); assertEquals(path1, wasb.getWorkingDirectory()); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestTracingContext.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestTracingContext.java new file mode 100644 index 0000000000000..006004850d0df --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestTracingContext.java @@ -0,0 +1,201 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.assertj.core.api.Assertions; +import org.junit.Assume; +import org.junit.AssumptionViolatedException; +import org.junit.Ignore; +import org.junit.Test; + +import org.apache.hadoop.fs.CommonPathCapabilities; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; +import org.apache.hadoop.fs.azurebfs.enums.Trilean; +import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; +import org.apache.hadoop.fs.azurebfs.services.AuthType; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; +import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderFormat; +import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderValidator; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; + +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.EMPTY_STRING; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_CLIENT_CORRELATIONID; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.MIN_BUFFER_SIZE; + +public class TestTracingContext extends AbstractAbfsIntegrationTest { + private static final String[] CLIENT_CORRELATIONID_LIST = { + "valid-corr-id-123", "inval!d", ""}; + private static final int HTTP_CREATED = 201; + + public TestTracingContext() throws Exception { + super(); + } + + @Test + public void testClientCorrelationId() throws Exception { + checkCorrelationConfigValidation(CLIENT_CORRELATIONID_LIST[0], true); + checkCorrelationConfigValidation(CLIENT_CORRELATIONID_LIST[1], false); + checkCorrelationConfigValidation(CLIENT_CORRELATIONID_LIST[2], false); + } + + private String getOctalNotation(FsPermission fsPermission) { + Preconditions.checkNotNull(fsPermission, "fsPermission"); + return String + .format(AbfsHttpConstants.PERMISSION_FORMAT, fsPermission.toOctal()); + } + + private String getRelativePath(final Path path) { + Preconditions.checkNotNull(path, "path"); + return path.toUri().getPath(); + } + + public void checkCorrelationConfigValidation(String clientCorrelationId, + boolean includeInHeader) throws Exception { + Configuration conf = getRawConfiguration(); + conf.set(FS_AZURE_CLIENT_CORRELATIONID, clientCorrelationId); + AzureBlobFileSystem fs = (AzureBlobFileSystem) FileSystem.newInstance(conf); + + String correlationID = fs.getClientCorrelationId(); + if (includeInHeader) { + Assertions.assertThat(correlationID) + .describedAs("Correlation ID should match config when valid") + .isEqualTo(clientCorrelationId); + } else { + Assertions.assertThat(correlationID) + .describedAs("Invalid ID should be replaced with empty string") + .isEqualTo(EMPTY_STRING); + } + TracingContext tracingContext = new TracingContext(clientCorrelationId, + fs.getFileSystemId(), FSOperationType.TEST_OP, + TracingHeaderFormat.ALL_ID_FORMAT, null); + boolean isNamespaceEnabled = fs.getIsNamespaceEnabled(tracingContext); + String path = getRelativePath(new Path("/testDir")); + String permission = isNamespaceEnabled + ? getOctalNotation(FsPermission.getDirDefault()) + : null; + String umask = isNamespaceEnabled + ? getOctalNotation(FsPermission.getUMask(fs.getConf())) + : null; + + //request should not fail for invalid clientCorrelationID + AbfsRestOperation op = fs.getAbfsClient() + .createPath(path, false, true, permission, umask, false, null, + tracingContext); + + int statusCode = op.getResult().getStatusCode(); + Assertions.assertThat(statusCode).describedAs("Request should not fail") + .isEqualTo(HTTP_CREATED); + + String requestHeader = op.getResult().getClientRequestId().replace("[", "") + .replace("]", ""); + Assertions.assertThat(requestHeader) + .describedAs("Client Request Header should match TracingContext") + .isEqualTo(tracingContext.getHeader()); + } + + @Ignore + @Test + //call test methods from the respective test classes + //can be ignored when running all tests as these get covered + public void runCorrelationTestForAllMethods() throws Exception { + Map testClasses = new HashMap<>(); + + testClasses.put(new ITestAzureBlobFileSystemListStatus(), //liststatus + ITestAzureBlobFileSystemListStatus.class.getMethod("testListPath")); + testClasses.put(new ITestAbfsReadWriteAndSeek(MIN_BUFFER_SIZE), //open, + // read, write + ITestAbfsReadWriteAndSeek.class.getMethod("testReadAheadRequestID")); + testClasses.put(new ITestAbfsReadWriteAndSeek(MIN_BUFFER_SIZE), //read (bypassreadahead) + ITestAbfsReadWriteAndSeek.class + .getMethod("testReadAndWriteWithDifferentBufferSizesAndSeek")); + testClasses.put(new ITestAzureBlobFileSystemAppend(), //append + ITestAzureBlobFileSystemAppend.class.getMethod("testTracingForAppend")); + testClasses.put(new ITestAzureBlobFileSystemFlush(), + ITestAzureBlobFileSystemFlush.class.getMethod( + "testTracingHeaderForAppendBlob")); //outputstream (appendblob) + testClasses.put(new ITestAzureBlobFileSystemCreate(), + ITestAzureBlobFileSystemCreate.class + .getMethod("testDefaultCreateOverwriteFileTest")); //create + testClasses.put(new ITestAzureBlobFilesystemAcl(), + ITestAzureBlobFilesystemAcl.class + .getMethod("testDefaultAclRenamedFile")); //rename + testClasses.put(new ITestAzureBlobFileSystemDelete(), + ITestAzureBlobFileSystemDelete.class + .getMethod("testDeleteFirstLevelDirectory")); //delete + testClasses.put(new ITestAzureBlobFileSystemCreate(), + ITestAzureBlobFileSystemCreate.class + .getMethod("testCreateNonRecursive")); //mkdirs + testClasses.put(new ITestAzureBlobFileSystemAttributes(), + ITestAzureBlobFileSystemAttributes.class + .getMethod("testSetGetXAttr")); //setxattr, getxattr + testClasses.put(new ITestAzureBlobFilesystemAcl(), + ITestAzureBlobFilesystemAcl.class.getMethod( + "testEnsureAclOperationWorksForRoot")); // setacl, getaclstatus, + // setowner, setpermission, modifyaclentries, + // removeaclentries, removedefaultacl, removeacl + + for (AbstractAbfsIntegrationTest testClass : testClasses.keySet()) { + try { + testClass.setup(); + testClasses.get(testClass).invoke(testClass); + testClass.teardown(); + } catch (InvocationTargetException e) { + if (!(e.getCause() instanceof AssumptionViolatedException)) { + throw new IOException(testClasses.get(testClass).getName() + + " failed tracing context validation test"); + } + } + } + } + + @Test + public void testExternalOps() throws Exception { + //validate tracing header for access, hasPathCapability + AzureBlobFileSystem fs = getFileSystem(); + + fs.registerListener(new TracingHeaderValidator( + fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), + fs.getFileSystemId(), FSOperationType.HAS_PATH_CAPABILITY, false, + 0)); + + // unset namespaceEnabled to call getAcl -> trigger tracing header validator + fs.getAbfsStore().setNamespaceEnabled(Trilean.UNKNOWN); + fs.hasPathCapability(new Path("/"), CommonPathCapabilities.FS_ACLS); + + Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); + Assume.assumeTrue(getConfiguration().isCheckAccessEnabled()); + Assume.assumeTrue(getAuthType() == AuthType.OAuth); + + fs.setListenerOperation(FSOperationType.ACCESS); + fs.getAbfsStore().setNamespaceEnabled(Trilean.TRUE); + fs.access(new Path("/"), FsAction.READ); + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStream.java index 299f085ee12de..a5d60c4b41a92 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStream.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.azurebfs.AbstractAbfsIntegrationTest; import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem; import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystemStore; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import org.junit.Test; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.ONE_MB; @@ -118,7 +119,8 @@ private void testExceptionInOptimization(final FileSystem fs, doThrow(new IOException()) .doCallRealMethod() .when(abfsInputStream) - .readRemote(anyLong(), any(), anyInt(), anyInt()); + .readRemote(anyLong(), any(), anyInt(), anyInt(), + any(TracingContext.class)); iStream = new FSDataInputStream(abfsInputStream); verifyBeforeSeek(abfsInputStream); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamReadFooter.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamReadFooter.java index 09a810c5fc9e8..1cb8ed3ea8cee 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamReadFooter.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamReadFooter.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import static java.lang.Math.max; import static java.lang.Math.min; @@ -270,7 +271,8 @@ private void testPartialReadWithNoData(final FileSystem fs, .getWrappedStream(); abfsInputStream = spy(abfsInputStream); doReturn(10).doReturn(10).doCallRealMethod().when(abfsInputStream) - .readRemote(anyLong(), any(), anyInt(), anyInt()); + .readRemote(anyLong(), any(), anyInt(), anyInt(), + any(TracingContext.class)); iStream = new FSDataInputStream(abfsInputStream); seek(iStream, seekPos); @@ -319,7 +321,8 @@ private void testPartialReadWithSomeDat(final FileSystem fs, - someDataLength; doReturn(10).doReturn(secondReturnSize).doCallRealMethod() .when(abfsInputStream) - .readRemote(anyLong(), any(), anyInt(), anyInt()); + .readRemote(anyLong(), any(), anyInt(), anyInt(), + any(TracingContext.class)); iStream = new FSDataInputStream(abfsInputStream); seek(iStream, seekPos); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamSmallFileReads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamSmallFileReads.java index ff03c0e78f4a3..d85da5aaeabd7 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamSmallFileReads.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamSmallFileReads.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; @@ -251,7 +252,8 @@ private void partialReadWithNoData(final FileSystem fs, .doReturn(10) .doCallRealMethod() .when(abfsInputStream) - .readRemote(anyLong(), any(), anyInt(), anyInt()); + .readRemote(anyLong(), any(), anyInt(), anyInt(), + any(TracingContext.class)); iStream = new FSDataInputStream(abfsInputStream); seek(iStream, seekPos); @@ -301,7 +303,8 @@ private void partialReadWithSomeData(final FileSystem fs, .doReturn(secondReturnSize) .doCallRealMethod() .when(abfsInputStream) - .readRemote(anyLong(), any(), anyInt(), anyInt()); + .readRemote(anyLong(), any(), anyInt(), anyInt(), + any(TracingContext.class)); iStream = new FSDataInputStream(abfsInputStream); seek(iStream, seekPos); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java index fff005114fbe0..431c456ae3daa 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java @@ -23,7 +23,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azurebfs.AbstractAbfsIntegrationTest; import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem; import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys; @@ -32,7 +31,7 @@ * Test create operation. */ public class ITestAbfsOutputStream extends AbstractAbfsIntegrationTest { - private static final Path TEST_FILE_PATH = new Path("testfile"); + private static final String TEST_FILE_PATH = "testfile"; public ITestAbfsOutputStream() throws Exception { super(); @@ -42,7 +41,7 @@ public ITestAbfsOutputStream() throws Exception { public void testMaxRequestsAndQueueCapacityDefaults() throws Exception { Configuration conf = getRawConfiguration(); final AzureBlobFileSystem fs = getFileSystem(conf); - try (FSDataOutputStream out = fs.create(TEST_FILE_PATH)) { + try (FSDataOutputStream out = fs.create(path(TEST_FILE_PATH))) { AbfsOutputStream stream = (AbfsOutputStream) out.getWrappedStream(); int maxConcurrentRequests @@ -71,19 +70,18 @@ public void testMaxRequestsAndQueueCapacity() throws Exception { conf.set(ConfigurationKeys.AZURE_WRITE_MAX_REQUESTS_TO_QUEUE, "" + maxRequestsToQueue); final AzureBlobFileSystem fs = getFileSystem(conf); - FSDataOutputStream out = fs.create(TEST_FILE_PATH); - AbfsOutputStream stream = (AbfsOutputStream) out.getWrappedStream(); + try (FSDataOutputStream out = fs.create(path(TEST_FILE_PATH))) { + AbfsOutputStream stream = (AbfsOutputStream) out.getWrappedStream(); - if (stream.isAppendBlobStream()) { - maxConcurrentRequests = 1; - } + if (stream.isAppendBlobStream()) { + maxConcurrentRequests = 1; + } - Assertions.assertThat(stream.getMaxConcurrentRequestCount()) - .describedAs("maxConcurrentRequests should be " + maxConcurrentRequests) - .isEqualTo(maxConcurrentRequests); - Assertions.assertThat(stream.getMaxRequestsThatCanBeQueued()) - .describedAs("maxRequestsToQueue should be " + maxRequestsToQueue) - .isEqualTo(maxRequestsToQueue); + Assertions.assertThat(stream.getMaxConcurrentRequestCount()).describedAs( + "maxConcurrentRequests should be " + maxConcurrentRequests).isEqualTo(maxConcurrentRequests); + Assertions.assertThat(stream.getMaxRequestsThatCanBeQueued()).describedAs("maxRequestsToQueue should be " + maxRequestsToQueue) + .isEqualTo(maxRequestsToQueue); + } } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java index 3da004bafa4df..62326e0dbb353 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.azurebfs.contracts.exceptions.TimeoutException; import org.apache.hadoop.fs.azurebfs.contracts.services.ReadBufferStatus; import org.apache.hadoop.fs.azurebfs.utils.TestCachedSASToken; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doReturn; @@ -93,7 +94,8 @@ private AbfsClient getMockAbfsClient() { return client; } - private AbfsInputStream getAbfsInputStream(AbfsClient mockAbfsClient, String fileName) { + private AbfsInputStream getAbfsInputStream(AbfsClient mockAbfsClient, + String fileName) throws IOException { AbfsInputStreamContext inputStreamContext = new AbfsInputStreamContext(-1); // Create AbfsInputStream with the client instance AbfsInputStream inputStream = new AbfsInputStream( @@ -102,7 +104,8 @@ private AbfsInputStream getAbfsInputStream(AbfsClient mockAbfsClient, String fil FORWARD_SLASH + fileName, THREE_KB, inputStreamContext.withReadBufferSize(ONE_KB).withReadAheadQueueDepth(10).withReadAheadBlockSize(ONE_KB), - "eTag"); + "eTag", + getTestTracingContext(null, false)); inputStream.setCachedSasToken( TestCachedSASToken.getTestCachedSASTokenInstance()); @@ -117,7 +120,7 @@ public AbfsInputStream getAbfsInputStream(AbfsClient abfsClient, int readAheadQueueDepth, int readBufferSize, boolean alwaysReadBufferSize, - int readAheadBlockSize) { + int readAheadBlockSize) throws IOException { AbfsInputStreamContext inputStreamContext = new AbfsInputStreamContext(-1); // Create AbfsInputStream with the client instance AbfsInputStream inputStream = new AbfsInputStream( @@ -129,7 +132,8 @@ public AbfsInputStream getAbfsInputStream(AbfsClient abfsClient, .withReadAheadQueueDepth(readAheadQueueDepth) .withShouldReadBufferSizeAlways(alwaysReadBufferSize) .withReadAheadBlockSize(readAheadBlockSize), - eTag); + eTag, + getTestTracingContext(getFileSystem(), false)); inputStream.setCachedSasToken( TestCachedSASToken.getTestCachedSASTokenInstance()); @@ -140,11 +144,13 @@ public AbfsInputStream getAbfsInputStream(AbfsClient abfsClient, private void queueReadAheads(AbfsInputStream inputStream) { // Mimic AbfsInputStream readAhead queue requests ReadBufferManager.getBufferManager() - .queueReadAhead(inputStream, 0, ONE_KB); + .queueReadAhead(inputStream, 0, ONE_KB, inputStream.getTracingContext()); ReadBufferManager.getBufferManager() - .queueReadAhead(inputStream, ONE_KB, ONE_KB); + .queueReadAhead(inputStream, ONE_KB, ONE_KB, + inputStream.getTracingContext()); ReadBufferManager.getBufferManager() - .queueReadAhead(inputStream, TWO_KB, TWO_KB); + .queueReadAhead(inputStream, TWO_KB, TWO_KB, + inputStream.getTracingContext()); } private void verifyReadCallCount(AbfsClient client, int count) throws @@ -154,7 +160,7 @@ private void verifyReadCallCount(AbfsClient client, int count) throws Thread.sleep(1000); verify(client, times(count)).read(any(String.class), any(Long.class), any(byte[].class), any(Integer.class), any(Integer.class), - any(String.class), any(String.class)); + any(String.class), any(String.class), any(TracingContext.class)); } private void checkEvictedStatus(AbfsInputStream inputStream, int position, boolean expectedToThrowException) @@ -209,7 +215,7 @@ public void testFailedReadAhead() throws Exception { .when(client) .read(any(String.class), any(Long.class), any(byte[].class), any(Integer.class), any(Integer.class), any(String.class), - any(String.class)); + any(String.class), any(TracingContext.class)); AbfsInputStream inputStream = getAbfsInputStream(client, "testFailedReadAhead.txt"); @@ -243,7 +249,7 @@ public void testFailedReadAheadEviction() throws Exception { .when(client) .read(any(String.class), any(Long.class), any(byte[].class), any(Integer.class), any(Integer.class), any(String.class), - any(String.class)); + any(String.class), any(TracingContext.class)); AbfsInputStream inputStream = getAbfsInputStream(client, "testFailedReadAheadEviction.txt"); @@ -258,7 +264,8 @@ public void testFailedReadAheadEviction() throws Exception { // at java.util.Stack.peek(Stack.java:102) // at java.util.Stack.pop(Stack.java:84) // at org.apache.hadoop.fs.azurebfs.services.ReadBufferManager.queueReadAhead - ReadBufferManager.getBufferManager().queueReadAhead(inputStream, 0, ONE_KB); + ReadBufferManager.getBufferManager().queueReadAhead(inputStream, 0, ONE_KB, + getTestTracingContext(getFileSystem(), true)); } /** @@ -287,7 +294,7 @@ public void testOlderReadAheadFailure() throws Exception { .when(client) .read(any(String.class), any(Long.class), any(byte[].class), any(Integer.class), any(Integer.class), any(String.class), - any(String.class)); + any(String.class), any(TracingContext.class)); AbfsInputStream inputStream = getAbfsInputStream(client, "testOlderReadAheadFailure.txt"); @@ -341,7 +348,7 @@ public void testSuccessfulReadAhead() throws Exception { .when(client) .read(any(String.class), any(Long.class), any(byte[].class), any(Integer.class), any(Integer.class), any(String.class), - any(String.class)); + any(String.class), any(TracingContext.class)); AbfsInputStream inputStream = getAbfsInputStream(client, "testSuccessfulReadAhead.txt"); int beforeReadCompletedListSize = ReadBufferManager.getBufferManager().getCompletedReadListSize(); @@ -399,7 +406,7 @@ public void testReadAheadManagerForFailedReadAhead() throws Exception { .when(client) .read(any(String.class), any(Long.class), any(byte[].class), any(Integer.class), any(Integer.class), any(String.class), - any(String.class)); + any(String.class), any(TracingContext.class)); AbfsInputStream inputStream = getAbfsInputStream(client, "testReadAheadManagerForFailedReadAhead.txt"); @@ -452,7 +459,7 @@ public void testReadAheadManagerForOlderReadAheadFailure() throws Exception { .when(client) .read(any(String.class), any(Long.class), any(byte[].class), any(Integer.class), any(Integer.class), any(String.class), - any(String.class)); + any(String.class), any(TracingContext.class)); AbfsInputStream inputStream = getAbfsInputStream(client, "testReadAheadManagerForOlderReadAheadFailure.txt"); @@ -506,7 +513,7 @@ public void testReadAheadManagerForSuccessfulReadAhead() throws Exception { .when(client) .read(any(String.class), any(Long.class), any(byte[].class), any(Integer.class), any(Integer.class), any(String.class), - any(String.class)); + any(String.class), any(TracingContext.class)); AbfsInputStream inputStream = getAbfsInputStream(client, "testSuccessfulReadAhead.txt"); @@ -576,13 +583,14 @@ public void testDefaultReadaheadQueueDepth() throws Exception { Configuration config = getRawConfiguration(); config.unset(FS_AZURE_READ_AHEAD_QUEUE_DEPTH); AzureBlobFileSystem fs = getFileSystem(config); - Path testFile = new Path("/testFile"); - fs.create(testFile); + Path testFile = path("/testFile"); + fs.create(testFile).close(); FSDataInputStream in = fs.open(testFile); Assertions.assertThat( ((AbfsInputStream) in.getWrappedStream()).getReadAheadQueueDepth()) .describedAs("readahead queue depth should be set to default value 2") .isEqualTo(2); + in.close(); } @@ -639,13 +647,12 @@ public AbfsInputStream testReadAheadConfigs(int readRequestSize, readAheadBlockSize = readRequestSize; } - Path testPath = new Path( - "/testReadAheadConfigs"); + Path testPath = path("/testReadAheadConfigs"); final AzureBlobFileSystem fs = createTestFile(testPath, ALWAYS_READ_BUFFER_SIZE_TEST_FILE_SIZE, config); byte[] byteBuffer = new byte[ONE_MB]; AbfsInputStream inputStream = this.getAbfsStore(fs) - .openFileForRead(testPath, null); + .openFileForRead(testPath, null, getTestTracingContext(fs, false)); Assertions.assertThat(inputStream.getBufferSize()) .describedAs("Unexpected AbfsInputStream buffer size") diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsOutputStream.java index f4243bc7e287b..f01c81b74eeed 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsOutputStream.java @@ -26,9 +26,12 @@ import org.mockito.ArgumentCaptor; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; import org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.azurebfs.utils.TracingContext; +import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderFormat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.isNull; @@ -86,11 +89,17 @@ public void verifyShortWriteRequest() throws Exception { abfsConf = new AbfsConfiguration(conf, accountName1); AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); when(client.getAbfsPerfTracker()).thenReturn(tracker); - when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); - when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull())).thenReturn(op); + when(client.append(anyString(), any(byte[].class), + any(AppendRequestParameters.class), any(), any(TracingContext.class))) + .thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), + isNull(), any(TracingContext.class))).thenReturn(op); AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, - populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false), + new TracingContext(abfsConf.getClientCorrelationId(), "test-fs-id", + FSOperationType.WRITE, abfsConf.getTracingHeaderFormat(), + null)); final byte[] b = new byte[WRITE_SIZE]; new Random().nextBytes(b); out.write(b); @@ -110,12 +119,13 @@ public void verifyShortWriteRequest() throws Exception { WRITE_SIZE, 0, 2 * WRITE_SIZE, APPEND_MODE, false, null); verify(client, times(1)).append( - eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); + eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), + any(TracingContext.class)); verify(client, times(1)).append( - eq(PATH), any(byte[].class), refEq(secondReqParameters), any()); + eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(TracingContext.class)); // confirm there were only 2 invocations in all verify(client, times(2)).append( - eq(PATH), any(byte[].class), any(), any()); + eq(PATH), any(byte[].class), any(), any(), any(TracingContext.class)); } /** @@ -131,13 +141,17 @@ public void verifyWriteRequest() throws Exception { conf.set(accountKey1, accountValue1); abfsConf = new AbfsConfiguration(conf, accountName1); AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); + TracingContext tracingContext = new TracingContext("test-corr-id", + "test-fs-id", FSOperationType.WRITE, + TracingHeaderFormat.ALL_ID_FORMAT, null); when(client.getAbfsPerfTracker()).thenReturn(tracker); - when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); - when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull())).thenReturn(op); + when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any(), any(TracingContext.class))).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull(), any(TracingContext.class))).thenReturn(op); AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, - populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false), + tracingContext); final byte[] b = new byte[WRITE_SIZE]; new Random().nextBytes(b); @@ -152,21 +166,26 @@ public void verifyWriteRequest() throws Exception { BUFFER_SIZE, 0, 5*WRITE_SIZE-BUFFER_SIZE, APPEND_MODE, false, null); verify(client, times(1)).append( - eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); + eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), + any(TracingContext.class)); verify(client, times(1)).append( - eq(PATH), any(byte[].class), refEq(secondReqParameters), any()); + eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), + any(TracingContext.class)); // confirm there were only 2 invocations in all verify(client, times(2)).append( - eq(PATH), any(byte[].class), any(), any()); + eq(PATH), any(byte[].class), any(), any(), + any(TracingContext.class)); ArgumentCaptor acFlushPath = ArgumentCaptor.forClass(String.class); ArgumentCaptor acFlushPosition = ArgumentCaptor.forClass(Long.class); + ArgumentCaptor acTracingContext = ArgumentCaptor + .forClass(TracingContext.class); ArgumentCaptor acFlushRetainUnCommittedData = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor acFlushClose = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor acFlushSASToken = ArgumentCaptor.forClass(String.class); verify(client, times(1)).flush(acFlushPath.capture(), acFlushPosition.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(), - acFlushSASToken.capture(), isNull()); + acFlushSASToken.capture(), isNull(), acTracingContext.capture()); assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushPath.getAllValues()); assertThat(Arrays.asList(Long.valueOf(5*WRITE_SIZE))).describedAs("position").isEqualTo(acFlushPosition.getAllValues()); assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues()); @@ -187,15 +206,19 @@ public void verifyWriteRequestOfBufferSizeAndClose() throws Exception { conf.set(accountKey1, accountValue1); abfsConf = new AbfsConfiguration(conf, accountName1); AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); + TracingContext tracingContext = new TracingContext( + abfsConf.getClientCorrelationId(), "test-fs-id", + FSOperationType.WRITE, abfsConf.getTracingHeaderFormat(), null); when(client.getAbfsPerfTracker()).thenReturn(tracker); - when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); - when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull())).thenReturn(op); + when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any(), any(TracingContext.class))).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull(), any(TracingContext.class))).thenReturn(op); when(op.getSasToken()).thenReturn("testToken"); when(op.getResult()).thenReturn(httpOp); AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, - populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false), + tracingContext); final byte[] b = new byte[BUFFER_SIZE]; new Random().nextBytes(b); @@ -210,21 +233,23 @@ public void verifyWriteRequestOfBufferSizeAndClose() throws Exception { BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false, null); verify(client, times(1)).append( - eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); + eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), any(TracingContext.class)); verify(client, times(1)).append( - eq(PATH), any(byte[].class), refEq(secondReqParameters), any()); + eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(TracingContext.class)); // confirm there were only 2 invocations in all verify(client, times(2)).append( - eq(PATH), any(byte[].class), any(), any()); + eq(PATH), any(byte[].class), any(), any(), any(TracingContext.class)); ArgumentCaptor acFlushPath = ArgumentCaptor.forClass(String.class); ArgumentCaptor acFlushPosition = ArgumentCaptor.forClass(Long.class); + ArgumentCaptor acTracingContext = ArgumentCaptor + .forClass(TracingContext.class); ArgumentCaptor acFlushRetainUnCommittedData = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor acFlushClose = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor acFlushSASToken = ArgumentCaptor.forClass(String.class); verify(client, times(1)).flush(acFlushPath.capture(), acFlushPosition.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(), - acFlushSASToken.capture(), isNull()); + acFlushSASToken.capture(), isNull(), acTracingContext.capture()); assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushPath.getAllValues()); assertThat(Arrays.asList(Long.valueOf(2*BUFFER_SIZE))).describedAs("position").isEqualTo(acFlushPosition.getAllValues()); assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues()); @@ -247,13 +272,19 @@ public void verifyWriteRequestOfBufferSize() throws Exception { AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); when(client.getAbfsPerfTracker()).thenReturn(tracker); - when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); - when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull())).thenReturn(op); + when(client.append(anyString(), any(byte[].class), + any(AppendRequestParameters.class), any(), any(TracingContext.class))) + .thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), + any(), isNull(), any(TracingContext.class))).thenReturn(op); when(op.getSasToken()).thenReturn("testToken"); when(op.getResult()).thenReturn(httpOp); AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, - populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false), + new TracingContext(abfsConf.getClientCorrelationId(), "test-fs-id", + FSOperationType.WRITE, abfsConf.getTracingHeaderFormat(), + null)); final byte[] b = new byte[BUFFER_SIZE]; new Random().nextBytes(b); @@ -268,12 +299,12 @@ public void verifyWriteRequestOfBufferSize() throws Exception { BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false, null); verify(client, times(1)).append( - eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); + eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), any(TracingContext.class)); verify(client, times(1)).append( - eq(PATH), any(byte[].class), refEq(secondReqParameters), any()); + eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(TracingContext.class)); // confirm there were only 2 invocations in all verify(client, times(2)).append( - eq(PATH), any(byte[].class), any(), any()); + eq(PATH), any(byte[].class), any(), any(), any(TracingContext.class)); } /** @@ -291,11 +322,17 @@ public void verifyWriteRequestOfBufferSizeWithAppendBlob() throws Exception { AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); when(client.getAbfsPerfTracker()).thenReturn(tracker); - when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); - when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull())).thenReturn(op); + when(client.append(anyString(), any(byte[].class), + any(AppendRequestParameters.class), any(), any(TracingContext.class))) + .thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), + isNull(), any(TracingContext.class))).thenReturn(op); AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, - populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, true)); + populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, true), + new TracingContext(abfsConf.getClientCorrelationId(), "test-fs-id", + FSOperationType.OPEN, abfsConf.getTracingHeaderFormat(), + null)); final byte[] b = new byte[BUFFER_SIZE]; new Random().nextBytes(b); @@ -310,12 +347,12 @@ public void verifyWriteRequestOfBufferSizeWithAppendBlob() throws Exception { BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, true, null); verify(client, times(1)).append( - eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); + eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), any(TracingContext.class)); verify(client, times(1)).append( - eq(PATH), any(byte[].class), refEq(secondReqParameters), any()); + eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(TracingContext.class)); // confirm there were only 2 invocations in all verify(client, times(2)).append( - eq(PATH), any(byte[].class), any(), any()); + eq(PATH), any(byte[].class), any(), any(), any(TracingContext.class)); } /** @@ -332,13 +369,21 @@ public void verifyWriteRequestOfBufferSizeAndHFlush() throws Exception { conf.set(accountKey1, accountValue1); abfsConf = new AbfsConfiguration(conf, accountName1); AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); + TracingContext tracingContext = new TracingContext( + abfsConf.getClientCorrelationId(), "test-fs-id", + FSOperationType.WRITE, abfsConf.getTracingHeaderFormat(), null); when(client.getAbfsPerfTracker()).thenReturn(tracker); - when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); - when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull())).thenReturn(op); + when(client.append(anyString(), any(byte[].class), + any(AppendRequestParameters.class), any(), any(TracingContext.class))) + .thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), + isNull(), any(TracingContext.class))).thenReturn(op); AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, - populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false), new TracingContext(abfsConf.getClientCorrelationId(), "test-fs-id", + FSOperationType.OPEN, abfsConf.getTracingHeaderFormat(), + null)); final byte[] b = new byte[BUFFER_SIZE]; new Random().nextBytes(b); @@ -353,21 +398,23 @@ public void verifyWriteRequestOfBufferSizeAndHFlush() throws Exception { BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false, null); verify(client, times(1)).append( - eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); + eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), any(TracingContext.class)); verify(client, times(1)).append( - eq(PATH), any(byte[].class), refEq(secondReqParameters), any()); + eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(TracingContext.class)); // confirm there were only 2 invocations in all verify(client, times(2)).append( - eq(PATH), any(byte[].class), any(), any()); + eq(PATH), any(byte[].class), any(), any(), any(TracingContext.class)); ArgumentCaptor acFlushPath = ArgumentCaptor.forClass(String.class); ArgumentCaptor acFlushPosition = ArgumentCaptor.forClass(Long.class); + ArgumentCaptor acTracingContext = ArgumentCaptor + .forClass(TracingContext.class); ArgumentCaptor acFlushRetainUnCommittedData = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor acFlushClose = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor acFlushSASToken = ArgumentCaptor.forClass(String.class); verify(client, times(1)).flush(acFlushPath.capture(), acFlushPosition.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(), - acFlushSASToken.capture(), isNull()); + acFlushSASToken.capture(), isNull(), acTracingContext.capture()); assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushPath.getAllValues()); assertThat(Arrays.asList(Long.valueOf(2*BUFFER_SIZE))).describedAs("position").isEqualTo(acFlushPosition.getAllValues()); assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues()); @@ -388,11 +435,17 @@ public void verifyWriteRequestOfBufferSizeAndFlush() throws Exception { abfsConf = new AbfsConfiguration(conf, accountName1); AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); when(client.getAbfsPerfTracker()).thenReturn(tracker); - when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); - when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull())).thenReturn(op); + when(client.append(anyString(), any(byte[].class), + any(AppendRequestParameters.class), any(), any(TracingContext.class))) + .thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), + isNull(), any(TracingContext.class))).thenReturn(op); AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, - populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false), + new TracingContext(abfsConf.getClientCorrelationId(), "test-fs-id", + FSOperationType.WRITE, abfsConf.getTracingHeaderFormat(), + null)); final byte[] b = new byte[BUFFER_SIZE]; new Random().nextBytes(b); @@ -409,11 +462,11 @@ public void verifyWriteRequestOfBufferSizeAndFlush() throws Exception { BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false, null); verify(client, times(1)).append( - eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); + eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), any(TracingContext.class)); verify(client, times(1)).append( - eq(PATH), any(byte[].class), refEq(secondReqParameters), any()); + eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(TracingContext.class)); // confirm there were only 2 invocations in all verify(client, times(2)).append( - eq(PATH), any(byte[].class), any(), any()); + eq(PATH), any(byte[].class), any(), any(), any(TracingContext.class)); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestExponentialRetryPolicy.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestExponentialRetryPolicy.java index e10419f148b25..0f8dc55aa14a4 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestExponentialRetryPolicy.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestExponentialRetryPolicy.java @@ -18,6 +18,11 @@ package org.apache.hadoop.fs.azurebfs.services; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_BACKOFF_INTERVAL; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_MAX_BACKOFF_INTERVAL; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_MAX_IO_RETRIES; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_MIN_BACKOFF_INTERVAL; + import java.util.Random; import org.junit.Assert; @@ -32,7 +37,6 @@ * Unit test TestExponentialRetryPolicy. */ public class TestExponentialRetryPolicy extends AbstractAbfsIntegrationTest { - private final int maxRetryCount = 30; private final int noRetryCount = 0; private final int retryCount = new Random().nextInt(maxRetryCount); @@ -57,12 +61,38 @@ public void testDifferentMaxIORetryCount() throws Exception { @Test public void testDefaultMaxIORetryCount() throws Exception { AbfsConfiguration abfsConfig = getAbfsConfig(); - Assert.assertTrue( + Assert.assertEquals( String.format("default maxIORetry count is %s.", maxRetryCount), - abfsConfig.getMaxIoRetries() == maxRetryCount); + maxRetryCount, abfsConfig.getMaxIoRetries()); testMaxIOConfig(abfsConfig); } + @Test + public void testAbfsConfigConstructor() throws Exception { + // Ensure we choose expected values that are not defaults + ExponentialRetryPolicy template = new ExponentialRetryPolicy( + getAbfsConfig().getMaxIoRetries()); + int testModifier = 1; + int expectedMaxRetries = template.getRetryCount() + testModifier; + int expectedMinBackoff = template.getMinBackoff() + testModifier; + int expectedMaxBackoff = template.getMaxBackoff() + testModifier; + int expectedDeltaBackoff = template.getDeltaBackoff() + testModifier; + + Configuration config = new Configuration(this.getRawConfiguration()); + config.setInt(AZURE_MAX_IO_RETRIES, expectedMaxRetries); + config.setInt(AZURE_MIN_BACKOFF_INTERVAL, expectedMinBackoff); + config.setInt(AZURE_MAX_BACKOFF_INTERVAL, expectedMaxBackoff); + config.setInt(AZURE_BACKOFF_INTERVAL, expectedDeltaBackoff); + + ExponentialRetryPolicy policy = new ExponentialRetryPolicy( + new AbfsConfiguration(config, "dummyAccountName")); + + Assert.assertEquals("Max retry count was not set as expected.", expectedMaxRetries, policy.getRetryCount()); + Assert.assertEquals("Min backoff interval was not set as expected.", expectedMinBackoff, policy.getMinBackoff()); + Assert.assertEquals("Max backoff interval was not set as expected.", expectedMaxBackoff, policy.getMaxBackoff()); + Assert.assertEquals("Delta backoff interval was not set as expected.", expectedDeltaBackoff, policy.getDeltaBackoff()); + } + private AbfsConfiguration getAbfsConfig() throws Exception { Configuration config = new Configuration(this.getRawConfiguration()); @@ -81,8 +111,8 @@ private void testMaxIOConfig(AbfsConfiguration abfsConfig) { localRetryCount++; } - Assert.assertTrue( + Assert.assertEquals( "When all retries are exhausted, the retryCount will be same as max configured", - localRetryCount == abfsConfig.getMaxIoRetries()); + abfsConfig.getMaxIoRetries(), localRetryCount); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/TracingHeaderValidator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/TracingHeaderValidator.java new file mode 100644 index 0000000000000..e195f1c381a94 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/TracingHeaderValidator.java @@ -0,0 +1,152 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.utils; + +import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; +import org.assertj.core.api.Assertions; + +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.EMPTY_STRING; + +/** + * Used to validate correlation identifiers provided during testing against + * values that get associated with a request through its TracingContext instance + */ +public class TracingHeaderValidator implements Listener { + private String clientCorrelationId; + private String fileSystemId; + private String primaryRequestId = EMPTY_STRING; + private boolean needsPrimaryRequestId; + private String streamID = ""; + private FSOperationType operation; + private int retryNum; + private TracingHeaderFormat format; + + private static final String GUID_PATTERN = "^[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}$"; + + @Override + public void callTracingHeaderValidator(String tracingContextHeader, + TracingHeaderFormat format) { + this.format = format; + validateTracingHeader(tracingContextHeader); + } + + @Override + public TracingHeaderValidator getClone() { + TracingHeaderValidator tracingHeaderValidator = new TracingHeaderValidator( + clientCorrelationId, fileSystemId, operation, needsPrimaryRequestId, + retryNum, streamID); + tracingHeaderValidator.primaryRequestId = primaryRequestId; + return tracingHeaderValidator; + } + + public TracingHeaderValidator(String clientCorrelationId, String fileSystemId, + FSOperationType operation, boolean needsPrimaryRequestId, int retryNum) { + this.clientCorrelationId = clientCorrelationId; + this.fileSystemId = fileSystemId; + this.operation = operation; + this.retryNum = retryNum; + this.needsPrimaryRequestId = needsPrimaryRequestId; + } + + public TracingHeaderValidator(String clientCorrelationId, String fileSystemId, + FSOperationType operation, boolean needsPrimaryRequestId, int retryNum, + String streamID) { + this(clientCorrelationId, fileSystemId, operation, needsPrimaryRequestId, + retryNum); + this.streamID = streamID; + } + + private void validateTracingHeader(String tracingContextHeader) { + String[] idList = tracingContextHeader.split(":"); + validateBasicFormat(idList); + if (format != TracingHeaderFormat.ALL_ID_FORMAT) { + return; + } + if (!primaryRequestId.isEmpty() && !idList[3].isEmpty()) { + Assertions.assertThat(idList[3]) + .describedAs("PrimaryReqID should be common for these requests") + .isEqualTo(primaryRequestId); + } + if (!streamID.isEmpty()) { + Assertions.assertThat(idList[4]) + .describedAs("Stream id should be common for these requests") + .isEqualTo(streamID); + } + } + + private void validateBasicFormat(String[] idList) { + if (format == TracingHeaderFormat.ALL_ID_FORMAT) { + Assertions.assertThat(idList) + .describedAs("header should have 7 elements").hasSize(7); + } else if (format == TracingHeaderFormat.TWO_ID_FORMAT) { + Assertions.assertThat(idList) + .describedAs("header should have 2 elements").hasSize(2); + } else { + Assertions.assertThat(idList).describedAs("header should have 1 element") + .hasSize(1); + Assertions.assertThat(idList[0]) + .describedAs("Client request ID is a guid").matches(GUID_PATTERN); + return; + } + + if (clientCorrelationId.matches("[a-zA-Z0-9-]*")) { + Assertions.assertThat(idList[0]) + .describedAs("Correlation ID should match config") + .isEqualTo(clientCorrelationId); + } else { + Assertions.assertThat(idList[0]) + .describedAs("Invalid config should be replaced with empty string") + .isEmpty(); + } + Assertions.assertThat(idList[1]).describedAs("Client request ID is a guid") + .matches(GUID_PATTERN); + + if (format != TracingHeaderFormat.ALL_ID_FORMAT) { + return; + } + + Assertions.assertThat(idList[2]).describedAs("Filesystem ID incorrect") + .isEqualTo(fileSystemId); + if (needsPrimaryRequestId && !operation + .equals(FSOperationType.READ)) { + Assertions.assertThat(idList[3]).describedAs("should have primaryReqId") + .isNotEmpty(); + } + Assertions.assertThat(idList[5]).describedAs("Operation name incorrect") + .isEqualTo(operation.toString()); + int retryCount = Integer.parseInt(idList[6]); + Assertions.assertThat(retryCount) + .describedAs("Retry was required due to issue on server side") + .isEqualTo(retryNum); + } + + /** + * Sets the value of expected Hadoop operation + * @param operation Hadoop operation code (String of two characters) + */ + @Override + public void setOperation(FSOperationType operation) { + this.operation = operation; + } + + @Override + public void updatePrimaryRequestID(String primaryRequestId) { + this.primaryRequestId = primaryRequestId; + } +}