From c4dd43d014387939d1ae9cfdf17d5f85877a5b29 Mon Sep 17 00:00:00 2001 From: Jeetesh Mangwani Date: Sun, 29 Sep 2019 23:40:47 -0700 Subject: [PATCH 01/13] track ADLS end-to-end latency --- .../hadoop/fs/azurebfs/AbfsConfiguration.java | 9 +- .../fs/azurebfs/AzureBlobFileSystemStore.java | 1069 +++++++++++------ .../azurebfs/constants/ConfigurationKeys.java | 2 + .../constants/FileSystemConfigurations.java | 1 + .../fs/azurebfs/services/AbfsClient.java | 5 +- .../azurebfs/services/AbfsHttpOperation.java | 40 + .../fs/azurebfs/services/AbfsInputStream.java | 8 + .../azurebfs/services/AbfsOutputStream.java | 26 +- .../azurebfs/services/AbfsRestOperation.java | 8 + .../fs/azurebfs/services/LatencyTracker.java | 203 ++++ .../fs/azurebfs/services/TestAbfsClient.java | 2 +- .../azurebfs/services/TestLatencyTracker.java | 473 ++++++++ .../src/test/resources/azure-test.xml | 5 + 13 files changed, 1481 insertions(+), 370 deletions(-) create mode 100644 hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/LatencyTracker.java create mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestLatencyTracker.java diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java index ef2d83f547746..bec774977a85e 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java @@ -178,6 +178,10 @@ public class AbfsConfiguration{ DefaultValue = DEFAULT_USE_UPN) private boolean useUpn; + @BooleanConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_ABFS_LATENCY_TRACK, + DefaultValue = DEFAULT_ABFS_LATENCY_TRACK) + private boolean trackLatency; + private Map storageAccountKeys; public AbfsConfiguration(final Configuration rawConfig, String accountName) @@ -471,6 +475,10 @@ public boolean isUpnUsed() { return this.useUpn; } + public boolean shouldTrackLatency() { + return this.trackLatency; + } + public AccessTokenProvider getTokenProvider() throws TokenAccessProviderException { AuthType authType = getEnum(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SharedKey); if (authType == AuthType.OAuth) { @@ -679,5 +687,4 @@ private String appendSlashIfNeeded(String authority) { } return authority; } - } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index 7f1bf103c2d52..e1143419cce53 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -35,6 +35,7 @@ import java.nio.charset.StandardCharsets; import java.text.ParseException; import java.text.SimpleDateFormat; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; @@ -81,6 +82,7 @@ import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; import org.apache.hadoop.fs.azurebfs.services.AuthType; import org.apache.hadoop.fs.azurebfs.services.ExponentialRetryPolicy; +import org.apache.hadoop.fs.azurebfs.services.LatencyTracker; import org.apache.hadoop.fs.azurebfs.services.SharedKeyCredentials; import org.apache.hadoop.fs.azurebfs.utils.Base64; import org.apache.hadoop.fs.azurebfs.utils.CRC64; @@ -130,6 +132,7 @@ public class AzureBlobFileSystemStore implements Closeable { private final AuthType authType; private final UserGroupInformation userGroupInformation; private final IdentityTransformer identityTransformer; + private final LatencyTracker latencyTracker; public AzureBlobFileSystemStore(URI uri, boolean isSecureScheme, Configuration configuration) throws IOException { @@ -162,6 +165,7 @@ public AzureBlobFileSystemStore(URI uri, boolean isSecureScheme, Configuration c this.authType = abfsConfiguration.getAuthType(accountName); boolean usingOauth = (authType == AuthType.OAuth); boolean useHttps = (usingOauth || abfsConfiguration.isHttpsAlwaysUsed()) ? true : isSecureScheme; + this.latencyTracker = new LatencyTracker(fileSystemName, accountName, this.abfsConfiguration); initializeClient(uri, fileSystemName, accountName, useHttps); this.identityTransformer = new IdentityTransformer(abfsConfiguration.getRawConfiguration()); } @@ -211,10 +215,16 @@ private String[] authorityParts(URI uri) throws InvalidUriAuthorityException, In public boolean getIsNamespaceEnabled() throws AzureBlobFileSystemException { if (!isNamespaceEnabledSet) { + final Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; + LOG.debug("Get root ACL status"); try { - client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + AbfsHttpConstants.ROOT_PATH); + AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + AbfsHttpConstants.ROOT_PATH); + res = op.getResult(); isNamespaceEnabled = true; + success = true; } catch (AbfsRestOperationException ex) { // Get ACL status is a HEAD request, its response doesn't contain errorCode // So can only rely on its status code to determine its account type. @@ -222,6 +232,8 @@ public boolean getIsNamespaceEnabled() throws AzureBlobFileSystemException { throw ex; } isNamespaceEnabled = false; + } finally { + latencyTracker.recordClientLatency(start, "getIsNamespaceEnabled", "getAclStatus", success, res); } isNamespaceEnabledSet = true; } @@ -265,182 +277,295 @@ public AbfsConfiguration getAbfsConfiguration() { } public Hashtable getFilesystemProperties() throws AzureBlobFileSystemException { - LOG.debug("getFilesystemProperties for filesystem: {}", - client.getFileSystem()); + final Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; + + try { + LOG.debug("getFilesystemProperties for filesystem: {}", + client.getFileSystem()); + + final Hashtable parsedXmsProperties; - final Hashtable parsedXmsProperties; + final AbfsRestOperation op = client.getFilesystemProperties(); + res = op.getResult(); - final AbfsRestOperation op = client.getFilesystemProperties(); - final String xMsProperties = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_PROPERTIES); + final String xMsProperties = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_PROPERTIES); - parsedXmsProperties = parseCommaSeparatedXmsProperties(xMsProperties); + parsedXmsProperties = parseCommaSeparatedXmsProperties(xMsProperties); + success = true; - return parsedXmsProperties; + return parsedXmsProperties; + } finally { + latencyTracker.recordClientLatency(start, "getFilesystemProperties", "getFilesystemProperties", success, res); + } } public void setFilesystemProperties(final Hashtable properties) throws AzureBlobFileSystemException { - if (properties == null || properties.isEmpty()) { - return; - } - - LOG.debug("setFilesystemProperties for filesystem: {} with properties: {}", - client.getFileSystem(), - properties); + final Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; - final String commaSeparatedProperties; try { - commaSeparatedProperties = convertXmsPropertiesToCommaSeparatedString(properties); - } catch (CharacterCodingException ex) { - throw new InvalidAbfsRestOperationException(ex); - } + if (properties == null || properties.isEmpty()) { + return; + } + + LOG.debug("setFilesystemProperties for filesystem: {} with properties: {}", + client.getFileSystem(), + properties); - client.setFilesystemProperties(commaSeparatedProperties); + final String commaSeparatedProperties; + try { + commaSeparatedProperties = convertXmsPropertiesToCommaSeparatedString(properties); + } catch (CharacterCodingException ex) { + throw new InvalidAbfsRestOperationException(ex); + } + + final AbfsRestOperation op = client.setFilesystemProperties(commaSeparatedProperties); + res = op.getResult(); + + success = true; + } finally { + latencyTracker.recordClientLatency(start, "setFilesystemProperties", "setFilesystemProperties", success, res); + } } public Hashtable getPathStatus(final Path path) throws AzureBlobFileSystemException { - LOG.debug("getPathStatus for filesystem: {} path: {}", - client.getFileSystem(), - path); + final Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; - final Hashtable parsedXmsProperties; - final AbfsRestOperation op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); + try { + LOG.debug("getPathStatus for filesystem: {} path: {}", + client.getFileSystem(), + path); + + final Hashtable parsedXmsProperties; + final AbfsRestOperation op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); + res = op.getResult(); + + final String xMsProperties = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_PROPERTIES); - final String xMsProperties = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_PROPERTIES); + parsedXmsProperties = parseCommaSeparatedXmsProperties(xMsProperties); - parsedXmsProperties = parseCommaSeparatedXmsProperties(xMsProperties); + success = true; - return parsedXmsProperties; + return parsedXmsProperties; + } finally { + latencyTracker.recordClientLatency(start, "getPathStatus", "getPathStatus", success, res); + } } public void setPathProperties(final Path path, final Hashtable properties) throws AzureBlobFileSystemException { - LOG.debug("setFilesystemProperties for filesystem: {} path: {} with properties: {}", - client.getFileSystem(), - path, - properties); + final Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; - final String commaSeparatedProperties; try { - commaSeparatedProperties = convertXmsPropertiesToCommaSeparatedString(properties); - } catch (CharacterCodingException ex) { - throw new InvalidAbfsRestOperationException(ex); + LOG.debug("setFilesystemProperties for filesystem: {} path: {} with properties: {}", + client.getFileSystem(), + path, + properties); + + final String commaSeparatedProperties; + try { + commaSeparatedProperties = convertXmsPropertiesToCommaSeparatedString(properties); + } catch (CharacterCodingException ex) { + throw new InvalidAbfsRestOperationException(ex); + } + final AbfsRestOperation op = client.setPathProperties(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), commaSeparatedProperties); + res = op.getResult(); + + success = true; + } finally { + latencyTracker.recordClientLatency(start, "setPathProperties", "setPathProperties", success, res); } - client.setPathProperties(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), commaSeparatedProperties); } public void createFilesystem() throws AzureBlobFileSystemException { - LOG.debug("createFilesystem for filesystem: {}", - client.getFileSystem()); + final Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; - client.createFilesystem(); + try { + LOG.debug("createFilesystem for filesystem: {}", + client.getFileSystem()); + + final AbfsRestOperation op = client.createFilesystem(); + res = op.getResult(); + + success = true; + } finally { + latencyTracker.recordClientLatency(start, "createFilesystem", "createFilesystem", success, res); + } } public void deleteFilesystem() throws AzureBlobFileSystemException { - LOG.debug("deleteFilesystem for filesystem: {}", - client.getFileSystem()); + Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; + + try { + LOG.debug("deleteFilesystem for filesystem: {}", + client.getFileSystem()); + + final AbfsRestOperation op = client.deleteFilesystem(); + res = op.getResult(); - client.deleteFilesystem(); + success = true; + } finally { + latencyTracker.recordClientLatency(start, "deleteFilesystem", "deleteFilesystem", success, res); + } } public OutputStream createFile(final Path path, final boolean overwrite, final FsPermission permission, final FsPermission umask) throws AzureBlobFileSystemException { - boolean isNamespaceEnabled = getIsNamespaceEnabled(); - LOG.debug("createFile filesystem: {} path: {} overwrite: {} permission: {} umask: {} isNamespaceEnabled: {}", - client.getFileSystem(), - path, - overwrite, - permission.toString(), - umask.toString(), - isNamespaceEnabled); - - client.createPath(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), true, overwrite, - isNamespaceEnabled ? getOctalNotation(permission) : null, - isNamespaceEnabled ? getOctalNotation(umask) : null); - - return new AbfsOutputStream( - client, - AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), - 0, - abfsConfiguration.getWriteBufferSize(), - abfsConfiguration.isFlushEnabled(), - abfsConfiguration.isOutputStreamFlushDisabled()); + final Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; + + try { + boolean isNamespaceEnabled = getIsNamespaceEnabled(); + LOG.debug("createFile filesystem: {} path: {} overwrite: {} permission: {} umask: {} isNamespaceEnabled: {}", + client.getFileSystem(), + path, + overwrite, + permission.toString(), + umask.toString(), + isNamespaceEnabled); + + final AbfsRestOperation op = client.createPath(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), true, overwrite, + isNamespaceEnabled ? getOctalNotation(permission) : null, + isNamespaceEnabled ? getOctalNotation(umask) : null); + res = op.getResult(); + + success = true; + + return new AbfsOutputStream( + client, + AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), + 0, + abfsConfiguration.getWriteBufferSize(), + abfsConfiguration.isFlushEnabled(), + abfsConfiguration.isOutputStreamFlushDisabled()); + } finally { + latencyTracker.recordClientLatency(start, "createFile", "createPath", success, res); + } } public void createDirectory(final Path path, final FsPermission permission, final FsPermission umask) throws AzureBlobFileSystemException { - boolean isNamespaceEnabled = getIsNamespaceEnabled(); - LOG.debug("createDirectory filesystem: {} path: {} permission: {} umask: {} isNamespaceEnabled: {}", - client.getFileSystem(), - path, - permission, - umask, - isNamespaceEnabled); + final Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; - client.createPath(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), false, true, - isNamespaceEnabled ? getOctalNotation(permission) : null, - isNamespaceEnabled ? getOctalNotation(umask) : null); + try { + boolean isNamespaceEnabled = getIsNamespaceEnabled(); + LOG.debug("createDirectory filesystem: {} path: {} permission: {} umask: {} isNamespaceEnabled: {}", + client.getFileSystem(), + path, + permission, + umask, + isNamespaceEnabled); + + final AbfsRestOperation op = client.createPath(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), false, true, + isNamespaceEnabled ? getOctalNotation(permission) : null, + isNamespaceEnabled ? getOctalNotation(umask) : null); + res = op.getResult(); + + success = true; + } finally { + latencyTracker.recordClientLatency(start, "createDirectory", "createPath", success, res); + } } public AbfsInputStream openFileForRead(final Path path, final FileSystem.Statistics statistics) throws AzureBlobFileSystemException { - LOG.debug("openFileForRead filesystem: {} path: {}", - client.getFileSystem(), - path); + final Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; - final AbfsRestOperation op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); + try { + LOG.debug("openFileForRead filesystem: {} path: {}", + client.getFileSystem(), + path); - final String resourceType = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_RESOURCE_TYPE); - final long contentLength = Long.parseLong(op.getResult().getResponseHeader(HttpHeaderConfigurations.CONTENT_LENGTH)); - final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); + final AbfsRestOperation op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); + res = op.getResult(); - if (parseIsDirectory(resourceType)) { - throw new AbfsRestOperationException( - AzureServiceErrorCode.PATH_NOT_FOUND.getStatusCode(), - AzureServiceErrorCode.PATH_NOT_FOUND.getErrorCode(), - "openFileForRead must be used with files and not directories", - null); - } + final String resourceType = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_RESOURCE_TYPE); + final long contentLength = Long.parseLong(op.getResult().getResponseHeader(HttpHeaderConfigurations.CONTENT_LENGTH)); + final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); + + if (parseIsDirectory(resourceType)) { + throw new AbfsRestOperationException( + AzureServiceErrorCode.PATH_NOT_FOUND.getStatusCode(), + AzureServiceErrorCode.PATH_NOT_FOUND.getErrorCode(), + "openFileForRead must be used with files and not directories", + null); + } + + success = true; - // Add statistics for InputStream - return new AbfsInputStream(client, statistics, - AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), contentLength, - abfsConfiguration.getReadBufferSize(), abfsConfiguration.getReadAheadQueueDepth(), - abfsConfiguration.getTolerateOobAppends(), eTag); + // Add statistics for InputStream + return new AbfsInputStream(client, statistics, + AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), contentLength, + abfsConfiguration.getReadBufferSize(), abfsConfiguration.getReadAheadQueueDepth(), + abfsConfiguration.getTolerateOobAppends(), eTag); + } finally { + latencyTracker.recordClientLatency(start, "openFileForRead", "getPathStatus", success, res); + } } public OutputStream openFileForWrite(final Path path, final boolean overwrite) throws AzureBlobFileSystemException { - LOG.debug("openFileForWrite filesystem: {} path: {} overwrite: {}", - client.getFileSystem(), - path, - overwrite); + final Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; - final AbfsRestOperation op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); + try { + LOG.debug("openFileForWrite filesystem: {} path: {} overwrite: {}", + client.getFileSystem(), + path, + overwrite); - final String resourceType = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_RESOURCE_TYPE); - final Long contentLength = Long.valueOf(op.getResult().getResponseHeader(HttpHeaderConfigurations.CONTENT_LENGTH)); + final AbfsRestOperation op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); + res = op.getResult(); - if (parseIsDirectory(resourceType)) { - throw new AbfsRestOperationException( - AzureServiceErrorCode.PATH_NOT_FOUND.getStatusCode(), - AzureServiceErrorCode.PATH_NOT_FOUND.getErrorCode(), - "openFileForRead must be used with files and not directories", - null); - } + final String resourceType = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_RESOURCE_TYPE); + final Long contentLength = Long.valueOf(op.getResult().getResponseHeader(HttpHeaderConfigurations.CONTENT_LENGTH)); + + if (parseIsDirectory(resourceType)) { + throw new AbfsRestOperationException( + AzureServiceErrorCode.PATH_NOT_FOUND.getStatusCode(), + AzureServiceErrorCode.PATH_NOT_FOUND.getErrorCode(), + "openFileForRead must be used with files and not directories", + null); + } + + final long offset = overwrite ? 0 : contentLength; - final long offset = overwrite ? 0 : contentLength; + success = true; - return new AbfsOutputStream( - client, - AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), - offset, - abfsConfiguration.getWriteBufferSize(), - abfsConfiguration.isFlushEnabled(), - abfsConfiguration.isOutputStreamFlushDisabled()); + return new AbfsOutputStream( + client, + AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), + offset, + abfsConfiguration.getWriteBufferSize(), + abfsConfiguration.isFlushEnabled(), + abfsConfiguration.isOutputStreamFlushDisabled()); + } finally { + latencyTracker.recordClientLatency(start, "openFileForWrite", "getPathStatus", success, res); + } } public void rename(final Path source, final Path destination) throws AzureBlobFileSystemException { + final Instant startAggregate = latencyTracker.getLatencyInstant(); + long countAggregate = 0; + boolean shouldContinue = true; if (isAtomicRenameKey(source.getName())) { LOG.warn("The atomic rename feature is not supported by the ABFS scheme; however rename," @@ -455,15 +580,34 @@ public void rename(final Path source, final Path destination) throws String continuation = null; do { - AbfsRestOperation op = client.renamePath(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(source), - AbfsHttpConstants.FORWARD_SLASH + getRelativePath(destination), continuation); - continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION); + Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; - } while (continuation != null && !continuation.isEmpty()); + try { + AbfsRestOperation op = client.renamePath(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(source), + AbfsHttpConstants.FORWARD_SLASH + getRelativePath(destination), continuation); + res = op.getResult(); + continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION); + success = true; + countAggregate++; + shouldContinue = continuation != null && !continuation.isEmpty(); + } finally { + if(shouldContinue) { + latencyTracker.recordClientLatency(start, "rename", "renamePath", success, res); + } else { + latencyTracker.recordClientLatency(start, "rename", "renamePath", success, startAggregate, countAggregate, res); + } + } + } while (shouldContinue); } public void delete(final Path path, final boolean recursive) throws AzureBlobFileSystemException { + final Instant startAggregate = latencyTracker.getLatencyInstant(); + long countAggregate = 0; + boolean shouldContinue = true; + LOG.debug("delete filesystem: {} path: {} recursive: {}", client.getFileSystem(), path, @@ -472,70 +616,102 @@ public void delete(final Path path, final boolean recursive) String continuation = null; do { - AbfsRestOperation op = client.deletePath( - AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), recursive, continuation); - continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION); + Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; - } while (continuation != null && !continuation.isEmpty()); + try { + AbfsRestOperation op = client.deletePath( + AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), recursive, continuation); + res = op.getResult(); + continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION); + success = true; + countAggregate++; + shouldContinue = continuation != null && !continuation.isEmpty(); + } finally { + if (shouldContinue) { + latencyTracker.recordClientLatency(start, "delete", "deletePath", success, res); + } else { + latencyTracker.recordClientLatency(start, "delete", "deletePath", success, startAggregate, countAggregate, res); + } + } + } while (shouldContinue); } public FileStatus getFileStatus(final Path path) throws IOException { - boolean isNamespaceEnabled = getIsNamespaceEnabled(); - LOG.debug("getFileStatus filesystem: {} path: {} isNamespaceEnabled: {}", - client.getFileSystem(), - path, - isNamespaceEnabled); + final Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; + String calleeName = null; - final AbfsRestOperation op; - if (path.isRoot()) { - op = isNamespaceEnabled - ? client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + AbfsHttpConstants.ROOT_PATH) - : client.getFilesystemProperties(); - } else { - op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); - } - - final long blockSize = abfsConfiguration.getAzureBlockSize(); - final AbfsHttpOperation result = op.getResult(); - - final String eTag = result.getResponseHeader(HttpHeaderConfigurations.ETAG); - final String lastModified = result.getResponseHeader(HttpHeaderConfigurations.LAST_MODIFIED); - final String permissions = result.getResponseHeader((HttpHeaderConfigurations.X_MS_PERMISSIONS)); - final boolean hasAcl = AbfsPermission.isExtendedAcl(permissions); - final long contentLength; - final boolean resourceIsDir; + try { + boolean isNamespaceEnabled = getIsNamespaceEnabled(); + LOG.debug("getFileStatus filesystem: {} path: {} isNamespaceEnabled: {}", + client.getFileSystem(), + path, + isNamespaceEnabled); + + final AbfsRestOperation op; + if (path.isRoot()) { + if (isNamespaceEnabled) { + calleeName = "getAclStatus"; + op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + AbfsHttpConstants.ROOT_PATH); + } else { + calleeName = "getFilesystemProperties"; + op = client.getFilesystemProperties(); + } + } else { + calleeName = "getPathStatus"; + op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); + } - if (path.isRoot()) { - contentLength = 0; - resourceIsDir = true; - } else { - contentLength = parseContentLength(result.getResponseHeader(HttpHeaderConfigurations.CONTENT_LENGTH)); - resourceIsDir = parseIsDirectory(result.getResponseHeader(HttpHeaderConfigurations.X_MS_RESOURCE_TYPE)); - } + res = op.getResult(); + final long blockSize = abfsConfiguration.getAzureBlockSize(); + final AbfsHttpOperation result = op.getResult(); + + final String eTag = result.getResponseHeader(HttpHeaderConfigurations.ETAG); + final String lastModified = result.getResponseHeader(HttpHeaderConfigurations.LAST_MODIFIED); + final String permissions = result.getResponseHeader((HttpHeaderConfigurations.X_MS_PERMISSIONS)); + final boolean hasAcl = AbfsPermission.isExtendedAcl(permissions); + final long contentLength; + final boolean resourceIsDir; + + if (path.isRoot()) { + contentLength = 0; + resourceIsDir = true; + } else { + contentLength = parseContentLength(result.getResponseHeader(HttpHeaderConfigurations.CONTENT_LENGTH)); + resourceIsDir = parseIsDirectory(result.getResponseHeader(HttpHeaderConfigurations.X_MS_RESOURCE_TYPE)); + } - final String transformedOwner = identityTransformer.transformIdentityForGetRequest( + final String transformedOwner = identityTransformer.transformIdentityForGetRequest( result.getResponseHeader(HttpHeaderConfigurations.X_MS_OWNER), true, userName); - final String transformedGroup = identityTransformer.transformIdentityForGetRequest( + final String transformedGroup = identityTransformer.transformIdentityForGetRequest( result.getResponseHeader(HttpHeaderConfigurations.X_MS_GROUP), false, primaryUserGroup); - return new VersionedFileStatus( - transformedOwner, - transformedGroup, - permissions == null ? new AbfsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL) - : AbfsPermission.valueOf(permissions), - hasAcl, - contentLength, - resourceIsDir, - 1, - blockSize, - parseLastModifiedTime(lastModified), - path, - eTag); + success = true; + + return new VersionedFileStatus( + transformedOwner, + transformedGroup, + permissions == null ? new AbfsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL) + : AbfsPermission.valueOf(permissions), + hasAcl, + contentLength, + resourceIsDir, + 1, + blockSize, + parseLastModifiedTime(lastModified), + path, + eTag); + } finally { + latencyTracker.recordClientLatency(start, "getFileStatus", calleeName, success, res); + } } /** @@ -559,6 +735,10 @@ public FileStatus[] listStatus(final Path path) throws IOException { * */ @InterfaceStability.Unstable public FileStatus[] listStatus(final Path path, final String startFrom) throws IOException { + final Instant startAggregate = latencyTracker.getLatencyInstant(); + long countAggregate = 0; + boolean shouldContinue = true; + LOG.debug("listStatus filesystem: {} path: {}, startFrom: {}", client.getFileSystem(), path, @@ -576,53 +756,69 @@ public FileStatus[] listStatus(final Path path, final String startFrom) throws I ArrayList fileStatuses = new ArrayList<>(); do { - AbfsRestOperation op = client.listPath(relativePath, false, LIST_MAX_RESULTS, continuation); - continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION); - ListResultSchema retrievedSchema = op.getResult().getListResultSchema(); - if (retrievedSchema == null) { - throw new AbfsRestOperationException( - AzureServiceErrorCode.PATH_NOT_FOUND.getStatusCode(), - AzureServiceErrorCode.PATH_NOT_FOUND.getErrorCode(), - "listStatusAsync path not found", - null, op.getResult()); - } + Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; - long blockSize = abfsConfiguration.getAzureBlockSize(); - - for (ListResultEntrySchema entry : retrievedSchema.paths()) { - final String owner = identityTransformer.transformIdentityForGetRequest(entry.owner(), true, userName); - final String group = identityTransformer.transformIdentityForGetRequest(entry.group(), false, primaryUserGroup); - final FsPermission fsPermission = entry.permissions() == null - ? new AbfsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL) - : AbfsPermission.valueOf(entry.permissions()); - final boolean hasAcl = AbfsPermission.isExtendedAcl(entry.permissions()); - - long lastModifiedMillis = 0; - long contentLength = entry.contentLength() == null ? 0 : entry.contentLength(); - boolean isDirectory = entry.isDirectory() == null ? false : entry.isDirectory(); - if (entry.lastModified() != null && !entry.lastModified().isEmpty()) { - lastModifiedMillis = parseLastModifiedTime(entry.lastModified()); + try { + AbfsRestOperation op = client.listPath(relativePath, false, LIST_MAX_RESULTS, continuation); + res = op.getResult(); + continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION); + ListResultSchema retrievedSchema = op.getResult().getListResultSchema(); + if (retrievedSchema == null) { + throw new AbfsRestOperationException( + AzureServiceErrorCode.PATH_NOT_FOUND.getStatusCode(), + AzureServiceErrorCode.PATH_NOT_FOUND.getErrorCode(), + "listStatusAsync path not found", + null, op.getResult()); } - Path entryPath = new Path(File.separator + entry.name()); - entryPath = entryPath.makeQualified(this.uri, entryPath); - - fileStatuses.add( - new VersionedFileStatus( - owner, - group, - fsPermission, - hasAcl, - contentLength, - isDirectory, - 1, - blockSize, - lastModifiedMillis, - entryPath, - entry.eTag())); - } + long blockSize = abfsConfiguration.getAzureBlockSize(); + + for (ListResultEntrySchema entry : retrievedSchema.paths()) { + final String owner = identityTransformer.transformIdentityForGetRequest(entry.owner(), true, userName); + final String group = identityTransformer.transformIdentityForGetRequest(entry.group(), false, primaryUserGroup); + final FsPermission fsPermission = entry.permissions() == null + ? new AbfsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL) + : AbfsPermission.valueOf(entry.permissions()); + final boolean hasAcl = AbfsPermission.isExtendedAcl(entry.permissions()); + + long lastModifiedMillis = 0; + long contentLength = entry.contentLength() == null ? 0 : entry.contentLength(); + boolean isDirectory = entry.isDirectory() == null ? false : entry.isDirectory(); + if (entry.lastModified() != null && !entry.lastModified().isEmpty()) { + lastModifiedMillis = parseLastModifiedTime(entry.lastModified()); + } - } while (continuation != null && !continuation.isEmpty()); + Path entryPath = new Path(File.separator + entry.name()); + entryPath = entryPath.makeQualified(this.uri, entryPath); + + fileStatuses.add( + new VersionedFileStatus( + owner, + group, + fsPermission, + hasAcl, + contentLength, + isDirectory, + 1, + blockSize, + lastModifiedMillis, + entryPath, + entry.eTag())); + } + + success = true; + countAggregate++; + shouldContinue = continuation != null && !continuation.isEmpty(); + } finally { + if (shouldContinue) { + latencyTracker.recordClientLatency(start, "listStatus", "listPath", success, res); + } else { + latencyTracker.recordClientLatency(start, "listStatus", "listPath", success, startAggregate, countAggregate, res); + } + } + } while (shouldContinue); return fileStatuses.toArray(new FileStatus[fileStatuses.size()]); } @@ -684,212 +880,361 @@ private String generateContinuationTokenForNonXns(final String path, final Strin public void setOwner(final Path path, final String owner, final String group) throws AzureBlobFileSystemException { - if (!getIsNamespaceEnabled()) { - throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); - } + final Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; - LOG.debug( - "setOwner filesystem: {} path: {} owner: {} group: {}", - client.getFileSystem(), - path.toString(), - owner, - group); + try { + if (!getIsNamespaceEnabled()) { + throw new UnsupportedOperationException( + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + } + + LOG.debug( + "setOwner filesystem: {} path: {} owner: {} group: {}", + client.getFileSystem(), + path.toString(), + owner, + group); - final String transformedOwner = identityTransformer.transformUserOrGroupForSetRequest(owner); - final String transformedGroup = identityTransformer.transformUserOrGroupForSetRequest(group); + final String transformedOwner = identityTransformer.transformUserOrGroupForSetRequest(owner); + final String transformedGroup = identityTransformer.transformUserOrGroupForSetRequest(group); - client.setOwner(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), transformedOwner, transformedGroup); + final AbfsRestOperation op = client.setOwner(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), transformedOwner, transformedGroup); + + res = op.getResult(); + + success = true; + } finally { + latencyTracker.recordClientLatency(start, "setOwner", "setOwner", success, res); + } } public void setPermission(final Path path, final FsPermission permission) throws AzureBlobFileSystemException { - if (!getIsNamespaceEnabled()) { - throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); - } + final Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; - LOG.debug( - "setPermission filesystem: {} path: {} permission: {}", - client.getFileSystem(), - path.toString(), - permission.toString()); - client.setPermission(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), - String.format(AbfsHttpConstants.PERMISSION_FORMAT, permission.toOctal())); + try { + if (!getIsNamespaceEnabled()) { + throw new UnsupportedOperationException( + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + } + + LOG.debug( + "setPermission filesystem: {} path: {} permission: {}", + client.getFileSystem(), + path.toString(), + permission.toString()); + final AbfsRestOperation op = client.setPermission(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + String.format(AbfsHttpConstants.PERMISSION_FORMAT, permission.toOctal())); + + res = op.getResult(); + + success = true; + } finally { + latencyTracker.recordClientLatency(start, "setPermission", "setPermission", success, res); + } } public void modifyAclEntries(final Path path, final List aclSpec) throws AzureBlobFileSystemException { - if (!getIsNamespaceEnabled()) { - throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); - } + final Instant startAggregate = latencyTracker.getLatencyInstant(); + long countAggregate = 0; + Instant startSet = null; - LOG.debug( - "modifyAclEntries filesystem: {} path: {} aclSpec: {}", - client.getFileSystem(), - path.toString(), - AclEntry.aclSpecToString(aclSpec)); + boolean successGet = false; + boolean successSet = false; + AbfsHttpOperation resultGet = null; + AbfsHttpOperation resultSet = null; + + try { + if (!getIsNamespaceEnabled()) { + throw new UnsupportedOperationException( + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + } + + LOG.debug( + "modifyAclEntries filesystem: {} path: {} aclSpec: {}", + client.getFileSystem(), + path.toString(), + AclEntry.aclSpecToString(aclSpec)); - identityTransformer.transformAclEntriesForSetRequest(aclSpec); - final Map modifyAclEntries = AbfsAclHelper.deserializeAclSpec(AclEntry.aclSpecToString(aclSpec)); - boolean useUpn = AbfsAclHelper.isUpnFormatAclEntries(modifyAclEntries); + identityTransformer.transformAclEntriesForSetRequest(aclSpec); + final Map modifyAclEntries = AbfsAclHelper.deserializeAclSpec(AclEntry.aclSpecToString(aclSpec)); + boolean useUpn = AbfsAclHelper.isUpnFormatAclEntries(modifyAclEntries); - final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), useUpn); - final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); + final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), useUpn); + resultGet = op.getResult(); + final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); - final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); + final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); - AbfsAclHelper.modifyAclEntriesInternal(aclEntries, modifyAclEntries); + AbfsAclHelper.modifyAclEntriesInternal(aclEntries, modifyAclEntries); - client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), - AbfsAclHelper.serializeAclSpec(aclEntries), eTag); + successGet = true; + countAggregate++; + startSet = latencyTracker.getLatencyInstant(); + + final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + AbfsAclHelper.serializeAclSpec(aclEntries), eTag); + resultSet = setAclOp.getResult(); + + successSet = true; + countAggregate++; + } finally { + latencyTracker.recordClientLatency(startAggregate, startSet, "modifyAclEntries", "getAclStatus", successGet, resultGet); + latencyTracker.recordClientLatency(startSet, "modifyAclEntries", "setAcl", successSet, startAggregate, countAggregate, resultSet); + } } public void removeAclEntries(final Path path, final List aclSpec) throws AzureBlobFileSystemException { - if (!getIsNamespaceEnabled()) { - throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); - } + final Instant startAggregate = latencyTracker.getLatencyInstant(); + long countAggregate = 0; + Instant startSet = null; - LOG.debug( - "removeAclEntries filesystem: {} path: {} aclSpec: {}", - client.getFileSystem(), - path.toString(), - AclEntry.aclSpecToString(aclSpec)); + boolean successGet = false; + boolean successSet = false; + AbfsHttpOperation resultGet = null; + AbfsHttpOperation resultSet = null; - identityTransformer.transformAclEntriesForSetRequest(aclSpec); - final Map removeAclEntries = AbfsAclHelper.deserializeAclSpec(AclEntry.aclSpecToString(aclSpec)); - boolean isUpnFormat = AbfsAclHelper.isUpnFormatAclEntries(removeAclEntries); + try { + if (!getIsNamespaceEnabled()) { + throw new UnsupportedOperationException( + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + } + + LOG.debug( + "removeAclEntries filesystem: {} path: {} aclSpec: {}", + client.getFileSystem(), + path.toString(), + AclEntry.aclSpecToString(aclSpec)); + + identityTransformer.transformAclEntriesForSetRequest(aclSpec); + final Map removeAclEntries = AbfsAclHelper.deserializeAclSpec(AclEntry.aclSpecToString(aclSpec)); + boolean isUpnFormat = AbfsAclHelper.isUpnFormatAclEntries(removeAclEntries); - final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), isUpnFormat); - final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); + final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), isUpnFormat); + resultGet = op.getResult(); + final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); - final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); + final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); - AbfsAclHelper.removeAclEntriesInternal(aclEntries, removeAclEntries); + AbfsAclHelper.removeAclEntriesInternal(aclEntries, removeAclEntries); - client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), - AbfsAclHelper.serializeAclSpec(aclEntries), eTag); + successGet = true; + countAggregate++; + startSet = latencyTracker.getLatencyInstant(); + + final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + AbfsAclHelper.serializeAclSpec(aclEntries), eTag); + resultSet = setAclOp.getResult(); + + successSet = true; + countAggregate++; + } finally { + latencyTracker.recordClientLatency(startAggregate, startSet, "removeAclEntries", "getAclStatus", successGet, resultGet); + latencyTracker.recordClientLatency(startSet, "removeAclEntries", "setAcl", successSet, startAggregate, countAggregate, resultSet); + } } public void removeDefaultAcl(final Path path) throws AzureBlobFileSystemException { - if (!getIsNamespaceEnabled()) { - throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); - } + final Instant startAggregate = latencyTracker.getLatencyInstant(); + long countAggregate = 0; + Instant startSet = null; - LOG.debug( - "removeDefaultAcl filesystem: {} path: {}", - client.getFileSystem(), - path.toString()); + boolean successGet = false; + boolean successSet = false; + AbfsHttpOperation resultGet = null; + AbfsHttpOperation resultSet = null; + + try { + if (!getIsNamespaceEnabled()) { + throw new UnsupportedOperationException( + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + } + + LOG.debug( + "removeDefaultAcl filesystem: {} path: {}", + client.getFileSystem(), + path.toString()); - final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); - final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); - final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); - final Map defaultAclEntries = new HashMap<>(); + final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); + resultGet = op.getResult(); + final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); + final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); + final Map defaultAclEntries = new HashMap<>(); - for (Map.Entry aclEntry : aclEntries.entrySet()) { - if (aclEntry.getKey().startsWith("default:")) { - defaultAclEntries.put(aclEntry.getKey(), aclEntry.getValue()); + for (Map.Entry aclEntry : aclEntries.entrySet()) { + if (aclEntry.getKey().startsWith("default:")) { + defaultAclEntries.put(aclEntry.getKey(), aclEntry.getValue()); + } } - } - aclEntries.keySet().removeAll(defaultAclEntries.keySet()); + aclEntries.keySet().removeAll(defaultAclEntries.keySet()); + + successGet = true; + countAggregate++; + startSet = latencyTracker.getLatencyInstant(); - client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), - AbfsAclHelper.serializeAclSpec(aclEntries), eTag); + final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + AbfsAclHelper.serializeAclSpec(aclEntries), eTag); + resultSet = setAclOp.getResult(); + + successSet = true; + countAggregate++; + } finally { + latencyTracker.recordClientLatency(startAggregate, startSet, "removeDefaultAcl", "getAclStatus", successGet, resultGet); + latencyTracker.recordClientLatency(startSet, "removeDefaultAcl", "setAcl", successSet, startAggregate, countAggregate, resultSet); + } } public void removeAcl(final Path path) throws AzureBlobFileSystemException { - if (!getIsNamespaceEnabled()) { - throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); - } + final Instant startAggregate = latencyTracker.getLatencyInstant(); + long countAggregate = 0; + Instant startSet = null; - LOG.debug( - "removeAcl filesystem: {} path: {}", - client.getFileSystem(), - path.toString()); - final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); - final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); + boolean successGet = false; + boolean successSet = false; + AbfsHttpOperation resultGet = null; + AbfsHttpOperation resultSet = null; + + try { + if (!getIsNamespaceEnabled()) { + throw new UnsupportedOperationException( + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + } + + LOG.debug( + "removeAcl filesystem: {} path: {}", + client.getFileSystem(), + path.toString()); + final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); + resultGet = op.getResult(); + final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); + + final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); + final Map newAclEntries = new HashMap<>(); - final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); - final Map newAclEntries = new HashMap<>(); + newAclEntries.put(AbfsHttpConstants.ACCESS_USER, aclEntries.get(AbfsHttpConstants.ACCESS_USER)); + newAclEntries.put(AbfsHttpConstants.ACCESS_GROUP, aclEntries.get(AbfsHttpConstants.ACCESS_GROUP)); + newAclEntries.put(AbfsHttpConstants.ACCESS_OTHER, aclEntries.get(AbfsHttpConstants.ACCESS_OTHER)); - newAclEntries.put(AbfsHttpConstants.ACCESS_USER, aclEntries.get(AbfsHttpConstants.ACCESS_USER)); - newAclEntries.put(AbfsHttpConstants.ACCESS_GROUP, aclEntries.get(AbfsHttpConstants.ACCESS_GROUP)); - newAclEntries.put(AbfsHttpConstants.ACCESS_OTHER, aclEntries.get(AbfsHttpConstants.ACCESS_OTHER)); + successGet = true; + countAggregate++; + startSet = latencyTracker.getLatencyInstant(); - client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), - AbfsAclHelper.serializeAclSpec(newAclEntries), eTag); + final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + AbfsAclHelper.serializeAclSpec(newAclEntries), eTag); + + resultSet = setAclOp.getResult(); + + successSet = true; + countAggregate++; + } finally { + latencyTracker.recordClientLatency(startAggregate, startSet, "removeAcl", "getAclStatus", successGet, resultGet); + latencyTracker.recordClientLatency(startSet, "removeAcl", "setAcl", successSet, startAggregate, countAggregate, resultSet); + } } public void setAcl(final Path path, final List aclSpec) throws AzureBlobFileSystemException { - if (!getIsNamespaceEnabled()) { - throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); - } + final Instant startAggregate = latencyTracker.getLatencyInstant(); + long countAggregate = 0; + Instant startSet = null; - LOG.debug( - "setAcl filesystem: {} path: {} aclspec: {}", - client.getFileSystem(), - path.toString(), - AclEntry.aclSpecToString(aclSpec)); + boolean successGet = false; + boolean successSet = false; + AbfsHttpOperation resultGet = null; + AbfsHttpOperation resultSet = null; - identityTransformer.transformAclEntriesForSetRequest(aclSpec); - final Map aclEntries = AbfsAclHelper.deserializeAclSpec(AclEntry.aclSpecToString(aclSpec)); - final boolean isUpnFormat = AbfsAclHelper.isUpnFormatAclEntries(aclEntries); + try { + if (!getIsNamespaceEnabled()) { + throw new UnsupportedOperationException( + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + } + + LOG.debug( + "setAcl filesystem: {} path: {} aclspec: {}", + client.getFileSystem(), + path.toString(), + AclEntry.aclSpecToString(aclSpec)); + + identityTransformer.transformAclEntriesForSetRequest(aclSpec); + final Map aclEntries = AbfsAclHelper.deserializeAclSpec(AclEntry.aclSpecToString(aclSpec)); + final boolean isUpnFormat = AbfsAclHelper.isUpnFormatAclEntries(aclEntries); - final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), isUpnFormat); - final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); + final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), isUpnFormat); + resultGet = op.getResult(); + final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); - final Map getAclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); + final Map getAclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); - AbfsAclHelper.setAclEntriesInternal(aclEntries, getAclEntries); + AbfsAclHelper.setAclEntriesInternal(aclEntries, getAclEntries); - client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), - AbfsAclHelper.serializeAclSpec(aclEntries), eTag); + startSet = latencyTracker.getLatencyInstant(); + successGet = true; + countAggregate++; + + final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + AbfsAclHelper.serializeAclSpec(aclEntries), eTag); + resultSet = setAclOp.getResult(); + + successSet = true; + countAggregate++; + } finally { + latencyTracker.recordClientLatency(startAggregate, startSet, "setAcl", "getAclStatus", successGet, resultGet); + latencyTracker.recordClientLatency(startSet, "setAcl", "setAcl", successSet, startAggregate, countAggregate, resultSet); + } } public AclStatus getAclStatus(final Path path) throws IOException { - if (!getIsNamespaceEnabled()) { - throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); - } + final Instant start = latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation result = null; - LOG.debug( - "getAclStatus filesystem: {} path: {}", - client.getFileSystem(), - path.toString()); - AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); - AbfsHttpOperation result = op.getResult(); - - final String transformedOwner = identityTransformer.transformIdentityForGetRequest( - result.getResponseHeader(HttpHeaderConfigurations.X_MS_OWNER), - true, - userName); - final String transformedGroup = identityTransformer.transformIdentityForGetRequest( - result.getResponseHeader(HttpHeaderConfigurations.X_MS_GROUP), - false, - primaryUserGroup); - - final String permissions = result.getResponseHeader(HttpHeaderConfigurations.X_MS_PERMISSIONS); - final String aclSpecString = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL); - - final List aclEntries = AclEntry.parseAclSpec(AbfsAclHelper.processAclString(aclSpecString), true); - identityTransformer.transformAclEntriesForGetRequest(aclEntries, userName, primaryUserGroup); - final FsPermission fsPermission = permissions == null ? new AbfsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL) - : AbfsPermission.valueOf(permissions); - - final AclStatus.Builder aclStatusBuilder = new AclStatus.Builder(); - aclStatusBuilder.owner(transformedOwner); - aclStatusBuilder.group(transformedGroup); - - aclStatusBuilder.setPermission(fsPermission); - aclStatusBuilder.stickyBit(fsPermission.getStickyBit()); - aclStatusBuilder.addEntries(aclEntries); - return aclStatusBuilder.build(); + try { + if (!getIsNamespaceEnabled()) { + throw new UnsupportedOperationException( + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + } + + LOG.debug( + "getAclStatus filesystem: {} path: {}", + client.getFileSystem(), + path.toString()); + AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); + result = op.getResult(); + + final String transformedOwner = identityTransformer.transformIdentityForGetRequest( + result.getResponseHeader(HttpHeaderConfigurations.X_MS_OWNER), + true, + userName); + final String transformedGroup = identityTransformer.transformIdentityForGetRequest( + result.getResponseHeader(HttpHeaderConfigurations.X_MS_GROUP), + false, + primaryUserGroup); + + final String permissions = result.getResponseHeader(HttpHeaderConfigurations.X_MS_PERMISSIONS); + final String aclSpecString = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL); + + final List aclEntries = AclEntry.parseAclSpec(AbfsAclHelper.processAclString(aclSpecString), true); + identityTransformer.transformAclEntriesForGetRequest(aclEntries, userName, primaryUserGroup); + final FsPermission fsPermission = permissions == null ? new AbfsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL) + : AbfsPermission.valueOf(permissions); + + final AclStatus.Builder aclStatusBuilder = new AclStatus.Builder(); + aclStatusBuilder.owner(transformedOwner); + aclStatusBuilder.group(transformedGroup); + + aclStatusBuilder.setPermission(fsPermission); + aclStatusBuilder.stickyBit(fsPermission.getStickyBit()); + aclStatusBuilder.addEntries(aclEntries); + success = true; + return aclStatusBuilder.build(); + } finally { + latencyTracker.recordClientLatency(start, "getAclStatus", "getAclStatus", success, result); + } } public boolean isAtomicRenameKey(String key) { @@ -930,7 +1275,7 @@ private void initializeClient(URI uri, String fileSystemName, String accountName abfsConfiguration.getRawConfiguration()); } - this.client = new AbfsClient(baseUrl, creds, abfsConfiguration, new ExponentialRetryPolicy(), tokenProvider); + this.client = new AbfsClient(baseUrl, creds, abfsConfiguration, new ExponentialRetryPolicy(), tokenProvider, latencyTracker); } private String getOctalNotation(FsPermission fsPermission) { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java index eb4605b1dfdb1..409ffc3c1240c 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java @@ -114,6 +114,8 @@ public final class ConfigurationKeys { public static final String FS_AZURE_ACCOUNT_OAUTH_REFRESH_TOKEN = "fs.azure.account.oauth2.refresh.token"; /** Key for oauth AAD refresh token endpoint: {@value}. */ public static final String FS_AZURE_ACCOUNT_OAUTH_REFRESH_TOKEN_ENDPOINT = "fs.azure.account.oauth2.refresh.token.endpoint"; + /** Key for enabling the tracking of ABFS API latency and sending the latency numbers to the ABFS API service */ + public static final String FS_AZURE_ABFS_LATENCY_TRACK = "fs.azure.abfs.latency.track"; public static String accountProperty(String property, String account) { return property + "." + account; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java index e0c355a07b2d3..3a4385ea3c4fe 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java @@ -67,6 +67,7 @@ public final class FileSystemConfigurations { public static final boolean DEFAULT_ENABLE_HTTPS = true; public static final boolean DEFAULT_USE_UPN = false; + public static final boolean DEFAULT_ABFS_LATENCY_TRACK = false; private FileSystemConfigurations() {} } \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java index edcd1972c23b7..ce107cb4fb307 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java @@ -60,6 +60,7 @@ public class AbfsClient implements Closeable { private final String filesystem; private final AbfsConfiguration abfsConfiguration; private final String userAgent; + protected final LatencyTracker latencyTracker; private final AccessTokenProvider tokenProvider; @@ -67,7 +68,8 @@ public class AbfsClient implements Closeable { public AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredentials, final AbfsConfiguration abfsConfiguration, final ExponentialRetryPolicy exponentialRetryPolicy, - final AccessTokenProvider tokenProvider) { + final AccessTokenProvider tokenProvider, + final LatencyTracker latencyTracker) { this.baseUrl = baseUrl; this.sharedKeyCredentials = sharedKeyCredentials; String baseUrlString = baseUrl.toString(); @@ -88,6 +90,7 @@ public AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredent this.userAgent = initializeUserAgent(abfsConfiguration, sslProviderName); this.tokenProvider = tokenProvider; + this.latencyTracker = latencyTracker; } @Override diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java index 5579877b5f623..94cebf0dc541b 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java @@ -21,8 +21,10 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.io.UnsupportedEncodingException; import java.net.HttpURLConnection; import java.net.URL; +import java.net.URLEncoder; import java.util.List; import java.util.UUID; @@ -161,6 +163,44 @@ public String toString() { return sb.toString(); } + // Returns a trace message for the ABFS API logging service to consume + public String toKvpString() { + String urlStr = null; + + try{ + urlStr = URLEncoder.encode(url.toString(), "UTF-8"); + } catch(UnsupportedEncodingException e) { + urlStr = "https%3A%2F%2Ffailed%2Fto%2Fencode%2Furl"; + } + + final StringBuilder sb = new StringBuilder(); + sb.append("s="); + sb.append(statusCode); + sb.append(" e="); + sb.append(storageErrorCode); + sb.append(" ci="); + sb.append(clientRequestId); + sb.append(" ri="); + sb.append(requestId); + if (isTraceEnabled) { + sb.append(" ct="); + sb.append(connectionTimeMs); + sb.append(" st="); + sb.append(sendRequestTimeMs); + sb.append(" rt="); + sb.append(recvResponseTimeMs); + } + sb.append(" bs="); + sb.append(bytesSent); + sb.append(" br="); + sb.append(bytesReceived); + sb.append(" m="); + sb.append(method); + sb.append(" u="); + sb.append(urlStr); + return sb.toString(); + } + /** * Initializes a new HTTP request and opens the connection. * diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java index fe48cb9323712..ee003a2be7cf9 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java @@ -22,6 +22,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.net.HttpURLConnection; +import java.time.Instant; import com.google.common.base.Preconditions; @@ -226,8 +227,13 @@ int readRemote(long position, byte[] b, int offset, int length) throws IOExcepti throw new IllegalArgumentException("requested read length is more than will fit after requested offset in buffer"); } final AbfsRestOperation op; + final Instant start = client.latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; try { op = client.read(path, position, b, offset, length, tolerateOobAppends ? "*" : eTag); + res = op.getResult(); + success = true; } catch (AzureBlobFileSystemException ex) { if (ex instanceof AbfsRestOperationException) { AbfsRestOperationException ere = (AbfsRestOperationException) ex; @@ -236,6 +242,8 @@ int readRemote(long position, byte[] b, int offset, int length) throws IOExcepti } } throw new IOException(ex); + } finally { + client.latencyTracker.recordClientLatency(start, "readRemote", "read", success, res); } long bytesRead = op.getResult().getBytesReceived(); if (bytesRead > Integer.MAX_VALUE) { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java index fd56eb0a015b9..c2703d78ab274 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java @@ -23,6 +23,7 @@ import java.io.InterruptedIOException; import java.io.OutputStream; import java.net.HttpURLConnection; +import java.time.Instant; import java.nio.ByteBuffer; import java.util.Locale; import java.util.concurrent.ConcurrentLinkedDeque; @@ -289,10 +290,18 @@ private synchronized void writeCurrentBufferToService() throws IOException { final Future job = completionService.submit(new Callable() { @Override public Void call() throws Exception { - client.append(path, offset, bytes, 0, - bytesLength); - byteBufferPool.putBuffer(ByteBuffer.wrap(bytes)); - return null; + final Instant start = client.latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; + try { + res = client.append(path, offset, bytes, 0, + bytesLength).getResult(); + byteBufferPool.putBuffer(ByteBuffer.wrap(bytes)); + success = true; + return null; + } finally { + client.latencyTracker.recordClientLatency(start, "writeCurrentBufferToService", "append", success, res); + } } }); @@ -334,8 +343,13 @@ private synchronized void flushWrittenBytesToServiceAsync() throws IOException { private synchronized void flushWrittenBytesToServiceInternal(final long offset, final boolean retainUncommitedData, final boolean isClose) throws IOException { + final Instant start = client.latencyTracker.getLatencyInstant(); + boolean success = false; + AbfsHttpOperation res = null; + try { - client.flush(path, offset, retainUncommitedData, isClose); + res = client.flush(path, offset, retainUncommitedData, isClose).getResult(); + success = true; } catch (AzureBlobFileSystemException ex) { if (ex instanceof AbfsRestOperationException) { if (((AbfsRestOperationException) ex).getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { @@ -343,6 +357,8 @@ private synchronized void flushWrittenBytesToServiceInternal(final long offset, } } throw new IOException(ex); + } finally { + client.latencyTracker.recordClientLatency(start, "flushWrittenBytesToServiceInternal", "flush", success, res); } this.lastFlushOffset = offset; } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java index 4196c10ed4728..cfecaac410e2d 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java @@ -121,7 +121,15 @@ public AbfsHttpOperation getResult() { * HTTP operations. */ void execute() throws AzureBlobFileSystemException { + // see if we have latency reports from the previous requests + String latencyHeader = this.client.latencyTracker.getClientLatency(); + + if(latencyHeader != null && !latencyHeader.isEmpty()) { + requestHeaders.add(new AbfsHttpHeader("x-ms-abfs-client-latency", latencyHeader)); + } + int retryCount = 0; + while (!executeHttpOperation(retryCount++)) { try { Thread.sleep(client.getRetryPolicy().getRetryInterval(retryCount)); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/LatencyTracker.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/LatencyTracker.java new file mode 100644 index 0000000000000..0d2978abfe865 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/LatencyTracker.java @@ -0,0 +1,203 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.time.Duration; +import java.time.Instant; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.ConcurrentLinkedQueue; + + +/** + * {@code LatencyTracker} keeps track of service latencies observed by {@code AbfsClient}. Every request adds + * its information (success/failure, latency etc) to the {@code LatencyTracker}'s queue. + * When a request is made, we check {@code LatencyTracker} to see if there are any latency numbers to be reported. + * If there are any, the stats are added to an HTTP header ({@code x-ms-abfs-client-latency}) on the next request. * + */ +public class LatencyTracker { + + // the logger + private static final Logger LOG = LoggerFactory.getLogger(LatencyTracker.class); + + // the queue to hold latency information + private final ConcurrentLinkedQueue Q = new ConcurrentLinkedQueue(); + + // whether the latency tracker has been enabled + private boolean enabled = false; + + // the host name + private String hostName; + + // the file system name + private String filesystemName; + + // the account name + private String accountName; + + // singleton latency reporting format + private String singletonLatencyReportingFormat; + + // aggregate latency reporting format + private String aggregateLatencyReportingFormat; + + public LatencyTracker(String filesystemName, String accountName, AbfsConfiguration configuration) { + this(filesystemName, accountName, configuration.shouldTrackLatency()); + } + + protected LatencyTracker(String filesystemName, String accountName, boolean enabled) { + this.enabled = enabled; + this.filesystemName = filesystemName; + this.accountName = accountName; + + LOG.debug("LatencyTracker configuration: {}", enabled); + + if(enabled) { + try { + hostName = InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException e) { + hostName = "UnknownHost"; + } + + singletonLatencyReportingFormat = "h=" + hostName + " t=%s a=" + accountName + " c=" + filesystemName + " cr=%s ce=%s r=%s l=%s%s"; + aggregateLatencyReportingFormat = "h=" + hostName + " t=%s a=" + accountName + " c=" + filesystemName + " cr=%s ce=%s r=%s l=%s ls=%s lc=%s%s"; + } + } + + public void recordClientLatency( + Instant operationStart, + String callerName, + String calleeName, + boolean success, + AbfsHttpOperation res) { + if(!enabled) return; + + Instant operationStop = getLatencyInstant(); + + recordClientLatency(operationStart, operationStop, callerName, calleeName, success, res); + } + + public void recordClientLatency( + Instant operationStart, + Instant operationStop, + String callerName, + String calleeName, + boolean success, + AbfsHttpOperation res) { + if(!enabled) return; + + Instant trackerStart = Instant.now(); + long latency = isValidInstant(operationStart) && isValidInstant(operationStop) ? + Duration.between(operationStart, operationStop).toMillis() : -1; + + String latencyDetails = String.format(singletonLatencyReportingFormat, + Instant.now(), + callerName, + calleeName, + success ? "Succeeded" : "Failed", + latency, + res == null ? "" : (" " + res.toKvpString())); + + this.offerToQueue(trackerStart, latencyDetails); + } + + public void recordClientLatency( + Instant operationStart, + String callerName, + String calleeName, + boolean success, + Instant aggregateStart, + long aggregateCount, + AbfsHttpOperation res) { + if(!enabled) return; + + Instant operationStop = getLatencyInstant(); + + recordClientLatency(operationStart, operationStop, callerName, calleeName, success, aggregateStart, aggregateCount, res); + } + + public void recordClientLatency( + Instant operationStart, + Instant operationStop, + String callerName, + String calleeName, + boolean success, + Instant aggregateStart, + long aggregateCount, + AbfsHttpOperation res){ + if(!enabled) return; + + Instant trackerStart = Instant.now(); + long latency = isValidInstant(operationStart) && isValidInstant(operationStop) ? + Duration.between(operationStart, operationStop).toMillis() : -1; + long aggregateLatency = isValidInstant(aggregateStart) && isValidInstant(operationStop) ? + Duration.between(aggregateStart, operationStop).toMillis() : -1; + + String latencyDetails = String.format(aggregateLatencyReportingFormat, + Instant.now(), + callerName, + calleeName, + success ? "Succeeded" : "Failed", + latency, + aggregateLatency, + aggregateCount, + res == null ? "" : (" " + res.toKvpString())); + + offerToQueue(trackerStart, latencyDetails); + } + + public String getClientLatency() { + if (!enabled) return null; + + Instant trackerStart = Instant.now(); + String latencyDetails = Q.poll(); // non-blocking pop + + if(LOG.isDebugEnabled()) { + Instant stop = Instant.now(); + long elapsed = Duration.between(trackerStart, stop).toMillis(); + LOG.debug(String.format("Dequeued latency info [%s ms]: %s", elapsed, latencyDetails)); + } + + return latencyDetails; + } + + public Instant getLatencyInstant() { + if (!enabled) return null; + return Instant.now(); + } + + private void offerToQueue(Instant trackerStart, String latencyDetails) { + Q.offer(latencyDetails); // non-blocking append + + if(LOG.isDebugEnabled()) { + Instant trackerStop = Instant.now(); + long elapsed = Duration.between(trackerStart, trackerStop).toMillis(); + LOG.debug(String.format("Queued latency info [%s ms]: %s", elapsed, latencyDetails)); + } + } + + private boolean isValidInstant(Instant testInstant) { + return testInstant != null && testInstant != Instant.MIN && testInstant != Instant.MAX; + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsClient.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsClient.java index 228e385410a7b..7df9fb1a35289 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsClient.java @@ -43,7 +43,7 @@ private void validateUserAgent(String expectedPattern, AbfsConfiguration config, boolean includeSSLProvider) { AbfsClient client = new AbfsClient(baseUrl, null, - config, null, null); + config, null, null, null); String sslProviderName = null; if (includeSSLProvider) { sslProviderName = DelegatingSSLSocketFactory.getDefaultFactory().getProviderName(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestLatencyTracker.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestLatencyTracker.java new file mode 100644 index 0000000000000..4f3d17fa10980 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestLatencyTracker.java @@ -0,0 +1,473 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import org.junit.Assert; +import org.junit.Ignore; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.URL; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.regex.*; + +/** + * Test the latency tracker for abfs + * + */ +public final class TestLatencyTracker { + private static final Logger LOG = LoggerFactory.getLogger(TestLatencyTracker.class); + private final String filesystemName = "bogusFilesystemName"; + private final String accountName = "bogusAccountName"; + private final URL url; + + public TestLatencyTracker() throws Exception + { + this.url = new URL("http", "www.microsoft.com", "/bogusFile"); + } + + @Test + public void verifyDisablingOfTracker() throws Exception { + // verify that disabling of the tracker works + LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, false); + + String latencyDetails = latencyTracker.getClientLatency(); + Assert.assertNull("LatencyTracker should be empty", latencyDetails); + + latencyTracker.recordClientLatency(Instant.now(), "disablingCaller", "disablingCallee", true, + new AbfsHttpOperation(url, "GET", new ArrayList())); + + latencyDetails = latencyTracker.getClientLatency(); + Assert.assertNull("LatencyTracker should return no record", latencyDetails); + } + + @Test + public void verifyTrackingForSingletonLatencyRecords() throws Exception { + // verify that tracking for singleton latency records works as expected + final int numTasks = 100; + LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, true); + + String latencyDetails = latencyTracker.getClientLatency(); + Assert.assertNull("LatencyTracker should be empty", latencyDetails); + + ExecutorService executorService = Executors.newCachedThreadPool(); + List> tasks = new ArrayList>(); + AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); + + for(int i=0; i < numTasks; i++) { + Callable c = new Callable() { + @Override + public Integer call() throws Exception { + latencyTracker.recordClientLatency(Instant.now(), "oneOperationCaller", "oneOperationCallee", true, httpOperation); + return 0; + } + }; + tasks.add(c); + } + + for(Future fr: executorService.invokeAll(tasks)) { + fr.get(); + } + + for(int i=0; i < numTasks; i++) { + latencyDetails = latencyTracker.getClientLatency(); + Assert.assertNotNull("LatencyTracker should return non-null record", latencyDetails); + Assert.assertTrue ("Latency record should be in the correct format", Pattern.matches( + "h=[^ ]* t=[^ ]* a=bogusFilesystemName c=bogusAccountName cr=oneOperationCaller ce=oneOperationCallee r=Succeeded l=[0-9]+ s=0 e= ci=[^ ]* ri=[^ ]* bs=0 br=0 m=GET u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile", latencyDetails)); + } + } + + @Test + public void verifyTrackingForAggregateLatencyRecords() throws Exception { + // verify that tracking of aggregate latency records works as expected + final int numTasks = 100; + LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, true); + + String latencyDetails = latencyTracker.getClientLatency(); + Assert.assertNull("LatencyTracker should be empty", latencyDetails); + + ExecutorService executorService = Executors.newCachedThreadPool(); + List> tasks = new ArrayList>(); + AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); + + for(int i=0; i < numTasks; i++) { + Callable c = new Callable() { + @Override + public Integer call() throws Exception { + // test latency tracking when aggregate latency numbers are also passed + latencyTracker.recordClientLatency(Instant.now(), "oneOperationCaller", "oneOperationCallee", true, Instant.now(), 42, httpOperation); + return 0; + } + }; + tasks.add(c); + } + + for(Future fr: executorService.invokeAll(tasks)) { + fr.get(); + } + + for(int i=0; i < numTasks; i++) { + latencyDetails = latencyTracker.getClientLatency(); + Assert.assertNotNull("LatencyTracker should return non-null record", latencyDetails); + Assert.assertTrue ("Latency record should be in the correct format", Pattern.matches( + "h=[^ ]* t=[^ ]* a=bogusFilesystemName c=bogusAccountName cr=oneOperationCaller ce=oneOperationCallee r=Succeeded l=[0-9]+ ls=[0-9]+ lc=42 s=0 e= ci=[^ ]* ri=[^ ]* bs=0 br=0 m=GET u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile", latencyDetails)); + } + } + + @Test + public void verifyRecordingSingletonLatencyIsCheapWhenDisabled() throws Exception { + // when latency tracker is disabled, we expect it to take time equivalent to checking a boolean value + final long maxLatencyWhenDisabledMs = 1; + final long minLatencyWhenDisabledMs = 0; + final long numTasks = 1000; + long aggregateLatency = 0; + LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, false); + + ExecutorService executorService = Executors.newCachedThreadPool(); + List> tasks = new ArrayList>(); + final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); + + for(int i=0; i < numTasks; i++) { + Callable c = new Callable() { + @Override + public Long call() throws Exception { + Instant startRecord = Instant.now(); + + try{ + ; // placeholder try block + } finally { + latencyTracker.recordClientLatency(startRecord, "oneOperationCaller", "oneOperationCallee", true, httpOperation); + } + + long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); + LOG.debug("Spent {} ms in recording latency.", latencyRecord); + return latencyRecord; + } + }; + tasks.add(c); + } + + for(Future fr: executorService.invokeAll(tasks)) { + aggregateLatency += fr.get(); + } + + double averageRecordLatency = aggregateLatency/numTasks; + Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), + averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); + } + + @Test + public void verifyRecordingAggregateLatencyIsCheapWhenDisabled() throws Exception { + // when latency tracker is disabled, we expect it to take time equivalent to checking a boolean value + final long maxLatencyWhenDisabledMs = 1; + final long minLatencyWhenDisabledMs = 0; + final long numTasks = 1000; + long aggregateLatency = 0; + LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, false); + + ExecutorService executorService = Executors.newCachedThreadPool(); + List> tasks = new ArrayList>(); + final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); + + for(int i=0; i < numTasks; i++) { + Callable c = new Callable() { + @Override + public Long call() throws Exception { + Instant startRecord = Instant.now(); + + try { + // placeholder try block + } finally { + latencyTracker.recordClientLatency(startRecord, "oneOperationCaller", "oneOperationCallee", true, startRecord, 42, httpOperation); + } + + long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); + LOG.debug("Spent {} ms in recording latency.", latencyRecord); + return latencyRecord; + } + }; + tasks.add(c); + } + + for(Future fr: executorService.invokeAll(tasks)) { + aggregateLatency += fr.get(); + } + + double averageRecordLatency = aggregateLatency/numTasks; + Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), + averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); + } + + @Test + public void verifyGettingLatencyRecordsIsCheapWhenDisabled() throws Exception { + // when latency tracker is disabled, we expect it to take time equivalent to checking a boolean value + final long maxLatencyWhenDisabledMs = 1; + final long minLatencyWhenDisabledMs = 0; + final long numTasks = 1000; + long aggregateLatency = 0; + LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, false); + + ExecutorService executorService = Executors.newCachedThreadPool(); + List> tasks = new ArrayList>(); + final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); + + for(int i=0; i < numTasks; i++) { + Callable c = new Callable() { + @Override + public Long call() throws Exception { + Instant startGet = Instant.now(); + latencyTracker.getClientLatency(); + long latencyGet = Duration.between(startGet, Instant.now()).toMillis(); + LOG.debug("Spent {} ms in retrieving latency record.", latencyGet); + return latencyGet; + } + }; + tasks.add(c); + } + + for(Future fr: executorService.invokeAll(tasks)) { + aggregateLatency += fr.get(); + } + + double averageRecordLatency = aggregateLatency/numTasks; + Assert.assertTrue(String.format("Average time for getting latency records, %s ms should be in the range [%s, %s).", averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), + averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); + } + + @Test + public void verifyRecordingSingletonLatencyIsCheapWhenEnabled() throws Exception { + final long maxLatencyWhenDisabledMs = 50; + final long minLatencyWhenDisabledMs = 0; + final long numTasks = 1000; + long aggregateLatency = 0; + LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, true); + + ExecutorService executorService = Executors.newCachedThreadPool(); + List> tasks = new ArrayList>(); + final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); + + for(int i=0; i < numTasks; i++) { + Callable c = new Callable() { + @Override + public Long call() throws Exception { + Instant startRecord = Instant.now(); + + try { + // placeholder try block + } finally { + latencyTracker.recordClientLatency(startRecord, "oneOperationCaller", "oneOperationCallee", true, httpOperation); + } + + long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); + LOG.debug("Spent {} ms in recording latency.", latencyRecord); + return latencyRecord; + } + }; + tasks.add(c); + } + + for(Future fr: executorService.invokeAll(tasks)) { + aggregateLatency += fr.get(); + } + + double averageRecordLatency = aggregateLatency/numTasks; + Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), + averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); + } + + @Test + public void verifyRecordingAggregateLatencyIsCheapWhenEnabled() throws Exception { + final long maxLatencyWhenDisabledMs = 50; + final long minLatencyWhenDisabledMs = 0; + final long numTasks = 1000; + long aggregateLatency = 0; + LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, true); + + ExecutorService executorService = Executors.newCachedThreadPool(); + List> tasks = new ArrayList>(); + final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); + + for(int i=0; i < numTasks; i++) { + Callable c = new Callable() { + @Override + public Long call() throws Exception { + Instant startRecord = Instant.now(); + + try { + // placeholder try block + } finally { + latencyTracker.recordClientLatency(startRecord, "oneOperationCaller", "oneOperationCallee", true, startRecord, 42, httpOperation); + } + + long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); + LOG.debug("Spent {} ms in recording latency.", latencyRecord); + return latencyRecord; + } + }; + tasks.add(c); + } + + for(Future fr: executorService.invokeAll(tasks)) { + aggregateLatency += fr.get(); + } + + double averageRecordLatency = aggregateLatency/numTasks; + Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), + averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); + } + + @Test + public void verifyGettingLatencyRecordsIsCheapWhenEnabled() throws Exception { + final long maxLatencyWhenDisabledMs = 50; + final long minLatencyWhenDisabledMs = 0; + final long numTasks = 1000; + long aggregateLatency = 0; + LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, true); + + ExecutorService executorService = Executors.newCachedThreadPool(); + List> tasks = new ArrayList>(); + final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); + + for(int i=0; i < numTasks; i++) { + Callable c = new Callable() { + @Override + public Long call() throws Exception { + Instant startRecord = Instant.now(); + latencyTracker.getClientLatency(); + long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); + LOG.debug("Spent {} ms in recording latency.", latencyRecord); + return latencyRecord; + } + }; + tasks.add(c); + } + + for(Future fr: executorService.invokeAll(tasks)) { + aggregateLatency += fr.get(); + } + + double averageRecordLatency = aggregateLatency/numTasks; + Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), + averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); + } + + @Test + public void verifyNoExceptionOnInvalidInputWhenDisabled() throws Exception { + Instant testInstant = Instant.now(); + LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, false); + final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); + + try { + latencyTracker.recordClientLatency(null, null, null, false, null); + latencyTracker.recordClientLatency(Instant.now(), null, null, false, null); + latencyTracker.recordClientLatency(Instant.now(), "test", null, false, null); + latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, null); + latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, httpOperation); + + latencyTracker.recordClientLatency(null, null, null, null, false, null); + latencyTracker.recordClientLatency(Instant.now(), null, null, null, false, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), null, null, false, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", null, false, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, httpOperation); + + latencyTracker.recordClientLatency(testInstant, Instant.now(), null, null, false, null); + latencyTracker.recordClientLatency(Instant.MAX, Instant.now(), null, null, false, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.MIN, null, null, false, null); + + latencyTracker.recordClientLatency(null, null, null, false, null, 0,null); + latencyTracker.recordClientLatency(Instant.now(), null, null, false, null, 0,null); + latencyTracker.recordClientLatency(Instant.now(), "test", null, false, null, 0,null); + latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, null, 0,null); + latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), 0,null); + latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), 42, httpOperation); + + latencyTracker.recordClientLatency(null, null, null, null, false, null, 0,null); + latencyTracker.recordClientLatency(Instant.now(), null, null, null, false, null, 0,null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), null, null, false, null, 0,null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", null, false, null, 0,null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, null, 0,null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), 0,null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), 42, httpOperation); + + latencyTracker.recordClientLatency(testInstant, Instant.now(), null, null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.MAX, Instant.now(), null, null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.MIN, null, null, false, null, 0, null); + + + } catch (Exception e) { + Assert.assertTrue("There should be no exception", false); + } + } + + @Test + public void verifyNoExceptionOnInvalidInputWhenEnabled() throws Exception { + Instant testInstant = Instant.now(); + LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, true); + final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); + + try { + latencyTracker.recordClientLatency(null, null, null, false, null); + latencyTracker.recordClientLatency(Instant.now(), null, null, false, null); + latencyTracker.recordClientLatency(Instant.now(), "test", null, false, null); + latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, null); + latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, httpOperation); + + latencyTracker.recordClientLatency(null, null, null, null, false, null); + latencyTracker.recordClientLatency(Instant.now(), null, null, null, false, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), null, null, false, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", null, false, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, httpOperation); + + latencyTracker.recordClientLatency(testInstant, Instant.now(), null, null, false, null); + latencyTracker.recordClientLatency(Instant.MAX, Instant.now(), null, null, false, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.MIN, null, null, false, null); + + latencyTracker.recordClientLatency(null, null, null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), null, null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), "test", null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), 0, null); + latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), 42, httpOperation); + + latencyTracker.recordClientLatency(null, null, null, null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), null, null, null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), null, null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), 0, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), 42, httpOperation); + + latencyTracker.recordClientLatency(testInstant, Instant.now(), null, null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.MAX, Instant.now(), null, null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.MIN, null, null, false, null, 0, null); + } catch (Exception e){ + Assert.assertTrue("There should be no exception", false); + } + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml b/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml index 24d444a88d743..d833cfbe017fa 100644 --- a/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml +++ b/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml @@ -33,6 +33,11 @@ false + + fs.azure.abfs.latency.track + false + + From f6e7183c40c939b15b92c8215a20167f7ada1028 Mon Sep 17 00:00:00 2001 From: Jeetesh Mangwani Date: Wed, 9 Oct 2019 16:51:06 -0700 Subject: [PATCH 02/13] fix checkstyle issues --- .../fs/azurebfs/AzureBlobFileSystemStore.java | 2 +- .../fs/azurebfs/services/AbfsClient.java | 6 +- .../fs/azurebfs/services/AbfsInputStream.java | 4 +- .../azurebfs/services/AbfsOutputStream.java | 8 +- .../azurebfs/services/AbfsRestOperation.java | 4 +- .../fs/azurebfs/services/LatencyTracker.java | 50 ++++---- .../azurebfs/services/TestLatencyTracker.java | 107 +++++++++--------- 7 files changed, 101 insertions(+), 80 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index e1143419cce53..7e73592767a94 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -593,7 +593,7 @@ public void rename(final Path source, final Path destination) throws countAggregate++; shouldContinue = continuation != null && !continuation.isEmpty(); } finally { - if(shouldContinue) { + if (shouldContinue) { latencyTracker.recordClientLatency(start, "rename", "renamePath", success, res); } else { latencyTracker.recordClientLatency(start, "rename", "renamePath", success, startAggregate, countAggregate, res); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java index ce107cb4fb307..77cc34204147a 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java @@ -60,7 +60,7 @@ public class AbfsClient implements Closeable { private final String filesystem; private final AbfsConfiguration abfsConfiguration; private final String userAgent; - protected final LatencyTracker latencyTracker; + private final LatencyTracker latencyTracker; private final AccessTokenProvider tokenProvider; @@ -104,6 +104,10 @@ public String getFileSystem() { return filesystem; } + protected LatencyTracker getLatencyTracker() { + return latencyTracker; + } + ExponentialRetryPolicy getRetryPolicy() { return retryPolicy; } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java index ee003a2be7cf9..1dfe4e7f74bae 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java @@ -227,7 +227,7 @@ int readRemote(long position, byte[] b, int offset, int length) throws IOExcepti throw new IllegalArgumentException("requested read length is more than will fit after requested offset in buffer"); } final AbfsRestOperation op; - final Instant start = client.latencyTracker.getLatencyInstant(); + final Instant start = client.getLatencyTracker().getLatencyInstant(); boolean success = false; AbfsHttpOperation res = null; try { @@ -243,7 +243,7 @@ int readRemote(long position, byte[] b, int offset, int length) throws IOExcepti } throw new IOException(ex); } finally { - client.latencyTracker.recordClientLatency(start, "readRemote", "read", success, res); + client.getLatencyTracker().recordClientLatency(start, "readRemote", "read", success, res); } long bytesRead = op.getResult().getBytesReceived(); if (bytesRead > Integer.MAX_VALUE) { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java index c2703d78ab274..3f459a09832c4 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java @@ -290,7 +290,7 @@ private synchronized void writeCurrentBufferToService() throws IOException { final Future job = completionService.submit(new Callable() { @Override public Void call() throws Exception { - final Instant start = client.latencyTracker.getLatencyInstant(); + final Instant start = client.getLatencyTracker().getLatencyInstant(); boolean success = false; AbfsHttpOperation res = null; try { @@ -300,7 +300,7 @@ public Void call() throws Exception { success = true; return null; } finally { - client.latencyTracker.recordClientLatency(start, "writeCurrentBufferToService", "append", success, res); + client.getLatencyTracker().recordClientLatency(start, "writeCurrentBufferToService", "append", success, res); } } }); @@ -343,7 +343,7 @@ private synchronized void flushWrittenBytesToServiceAsync() throws IOException { private synchronized void flushWrittenBytesToServiceInternal(final long offset, final boolean retainUncommitedData, final boolean isClose) throws IOException { - final Instant start = client.latencyTracker.getLatencyInstant(); + final Instant start = client.getLatencyTracker().getLatencyInstant(); boolean success = false; AbfsHttpOperation res = null; @@ -358,7 +358,7 @@ private synchronized void flushWrittenBytesToServiceInternal(final long offset, } throw new IOException(ex); } finally { - client.latencyTracker.recordClientLatency(start, "flushWrittenBytesToServiceInternal", "flush", success, res); + client.getLatencyTracker().recordClientLatency(start, "flushWrittenBytesToServiceInternal", "flush", success, res); } this.lastFlushOffset = offset; } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java index cfecaac410e2d..44deb8e013319 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java @@ -122,9 +122,9 @@ public AbfsHttpOperation getResult() { */ void execute() throws AzureBlobFileSystemException { // see if we have latency reports from the previous requests - String latencyHeader = this.client.latencyTracker.getClientLatency(); + String latencyHeader = this.client.getLatencyTracker().getClientLatency(); - if(latencyHeader != null && !latencyHeader.isEmpty()) { + if (latencyHeader != null && !latencyHeader.isEmpty()) { requestHeaders.add(new AbfsHttpHeader("x-ms-abfs-client-latency", latencyHeader)); } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/LatencyTracker.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/LatencyTracker.java index 0d2978abfe865..d34768d220b74 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/LatencyTracker.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/LatencyTracker.java @@ -26,7 +26,6 @@ import java.net.UnknownHostException; import java.time.Duration; import java.time.Instant; -import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.ConcurrentLinkedQueue; @@ -42,7 +41,7 @@ public class LatencyTracker { private static final Logger LOG = LoggerFactory.getLogger(LatencyTracker.class); // the queue to hold latency information - private final ConcurrentLinkedQueue Q = new ConcurrentLinkedQueue(); + private final ConcurrentLinkedQueue queue = new ConcurrentLinkedQueue(); // whether the latency tracker has been enabled private boolean enabled = false; @@ -73,7 +72,7 @@ protected LatencyTracker(String filesystemName, String accountName, boolean enab LOG.debug("LatencyTracker configuration: {}", enabled); - if(enabled) { + if (enabled) { try { hostName = InetAddress.getLocalHost().getHostName(); } catch (UnknownHostException e) { @@ -91,7 +90,9 @@ public void recordClientLatency( String calleeName, boolean success, AbfsHttpOperation res) { - if(!enabled) return; + if (!enabled) { + return; + } Instant operationStop = getLatencyInstant(); @@ -105,11 +106,13 @@ public void recordClientLatency( String calleeName, boolean success, AbfsHttpOperation res) { - if(!enabled) return; + if (!enabled) { + return; + } Instant trackerStart = Instant.now(); - long latency = isValidInstant(operationStart) && isValidInstant(operationStop) ? - Duration.between(operationStart, operationStop).toMillis() : -1; + long latency = isValidInstant(operationStart) && isValidInstant(operationStop) + ? Duration.between(operationStart, operationStop).toMillis() : -1; String latencyDetails = String.format(singletonLatencyReportingFormat, Instant.now(), @@ -130,7 +133,9 @@ public void recordClientLatency( Instant aggregateStart, long aggregateCount, AbfsHttpOperation res) { - if(!enabled) return; + if (!enabled) { + return; + } Instant operationStop = getLatencyInstant(); @@ -146,13 +151,15 @@ public void recordClientLatency( Instant aggregateStart, long aggregateCount, AbfsHttpOperation res){ - if(!enabled) return; + if (!enabled) { + return; + } Instant trackerStart = Instant.now(); - long latency = isValidInstant(operationStart) && isValidInstant(operationStop) ? - Duration.between(operationStart, operationStop).toMillis() : -1; - long aggregateLatency = isValidInstant(aggregateStart) && isValidInstant(operationStop) ? - Duration.between(aggregateStart, operationStop).toMillis() : -1; + long latency = isValidInstant(operationStart) && isValidInstant(operationStop) + ? Duration.between(operationStart, operationStop).toMillis() : -1; + long aggregateLatency = isValidInstant(aggregateStart) && isValidInstant(operationStop) + ? Duration.between(aggregateStart, operationStop).toMillis() : -1; String latencyDetails = String.format(aggregateLatencyReportingFormat, Instant.now(), @@ -168,12 +175,14 @@ public void recordClientLatency( } public String getClientLatency() { - if (!enabled) return null; + if (!enabled) { + return null; + } Instant trackerStart = Instant.now(); - String latencyDetails = Q.poll(); // non-blocking pop + String latencyDetails = queue.poll(); // non-blocking pop - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { Instant stop = Instant.now(); long elapsed = Duration.between(trackerStart, stop).toMillis(); LOG.debug(String.format("Dequeued latency info [%s ms]: %s", elapsed, latencyDetails)); @@ -183,14 +192,17 @@ public String getClientLatency() { } public Instant getLatencyInstant() { - if (!enabled) return null; + if (!enabled) { + return null; + } + return Instant.now(); } private void offerToQueue(Instant trackerStart, String latencyDetails) { - Q.offer(latencyDetails); // non-blocking append + queue.offer(latencyDetails); // non-blocking append - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { Instant trackerStop = Instant.now(); long elapsed = Duration.between(trackerStart, trackerStop).toMillis(); LOG.debug(String.format("Queued latency info [%s ms]: %s", elapsed, latencyDetails)); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestLatencyTracker.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestLatencyTracker.java index 4f3d17fa10980..7ae24059c80af 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestLatencyTracker.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestLatencyTracker.java @@ -19,7 +19,6 @@ package org.apache.hadoop.fs.azurebfs.services; import org.junit.Assert; -import org.junit.Ignore; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -33,7 +32,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; -import java.util.regex.*; +import java.util.regex.Pattern; /** * Test the latency tracker for abfs @@ -45,8 +44,7 @@ public final class TestLatencyTracker { private final String accountName = "bogusAccountName"; private final URL url; - public TestLatencyTracker() throws Exception - { + public TestLatencyTracker() throws Exception { this.url = new URL("http", "www.microsoft.com", "/bogusFile"); } @@ -78,7 +76,7 @@ public void verifyTrackingForSingletonLatencyRecords() throws Exception { List> tasks = new ArrayList>(); AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - for(int i=0; i < numTasks; i++) { + for (int i=0; i < numTasks; i++) { Callable c = new Callable() { @Override public Integer call() throws Exception { @@ -89,15 +87,16 @@ public Integer call() throws Exception { tasks.add(c); } - for(Future fr: executorService.invokeAll(tasks)) { + for (Future fr: executorService.invokeAll(tasks)) { fr.get(); } - for(int i=0; i < numTasks; i++) { + for (int i=0; i < numTasks; i++) { latencyDetails = latencyTracker.getClientLatency(); Assert.assertNotNull("LatencyTracker should return non-null record", latencyDetails); Assert.assertTrue ("Latency record should be in the correct format", Pattern.matches( - "h=[^ ]* t=[^ ]* a=bogusFilesystemName c=bogusAccountName cr=oneOperationCaller ce=oneOperationCallee r=Succeeded l=[0-9]+ s=0 e= ci=[^ ]* ri=[^ ]* bs=0 br=0 m=GET u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile", latencyDetails)); + "h=[^ ]* t=[^ ]* a=bogusFilesystemName c=bogusAccountName cr=oneOperationCaller ce=oneOperationCallee r=Succeeded l=[0-9]+" + + " s=0 e= ci=[^ ]* ri=[^ ]* bs=0 br=0 m=GET u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile", latencyDetails)); } } @@ -114,27 +113,28 @@ public void verifyTrackingForAggregateLatencyRecords() throws Exception { List> tasks = new ArrayList>(); AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - for(int i=0; i < numTasks; i++) { + for (int i=0; i < numTasks; i++) { Callable c = new Callable() { @Override public Integer call() throws Exception { // test latency tracking when aggregate latency numbers are also passed - latencyTracker.recordClientLatency(Instant.now(), "oneOperationCaller", "oneOperationCallee", true, Instant.now(), 42, httpOperation); + latencyTracker.recordClientLatency(Instant.now(), "oneOperationCaller", "oneOperationCallee", true, Instant.now(), 123, httpOperation); return 0; } }; tasks.add(c); } - for(Future fr: executorService.invokeAll(tasks)) { + for (Future fr: executorService.invokeAll(tasks)) { fr.get(); } - for(int i=0; i < numTasks; i++) { + for (int i=0; i < numTasks; i++) { latencyDetails = latencyTracker.getClientLatency(); Assert.assertNotNull("LatencyTracker should return non-null record", latencyDetails); - Assert.assertTrue ("Latency record should be in the correct format", Pattern.matches( - "h=[^ ]* t=[^ ]* a=bogusFilesystemName c=bogusAccountName cr=oneOperationCaller ce=oneOperationCallee r=Succeeded l=[0-9]+ ls=[0-9]+ lc=42 s=0 e= ci=[^ ]* ri=[^ ]* bs=0 br=0 m=GET u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile", latencyDetails)); + Assert.assertTrue("Latency record should be in the correct format", Pattern.matches( + "h=[^ ]* t=[^ ]* a=bogusFilesystemName c=bogusAccountName cr=oneOperationCaller ce=oneOperationCallee r=Succeeded l=[0-9]+" + + " ls=[0-9]+ lc=123 s=0 e= ci=[^ ]* ri=[^ ]* bs=0 br=0 m=GET u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile", latencyDetails)); } } @@ -151,14 +151,13 @@ public void verifyRecordingSingletonLatencyIsCheapWhenDisabled() throws Exceptio List> tasks = new ArrayList>(); final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - for(int i=0; i < numTasks; i++) { + for (int i=0; i < numTasks; i++) { Callable c = new Callable() { @Override public Long call() throws Exception { Instant startRecord = Instant.now(); try{ - ; // placeholder try block } finally { latencyTracker.recordClientLatency(startRecord, "oneOperationCaller", "oneOperationCallee", true, httpOperation); } @@ -171,12 +170,13 @@ public Long call() throws Exception { tasks.add(c); } - for(Future fr: executorService.invokeAll(tasks)) { + for (Future fr: executorService.invokeAll(tasks)) { aggregateLatency += fr.get(); } double averageRecordLatency = aggregateLatency/numTasks; - Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), + Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", + averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); } @@ -193,7 +193,7 @@ public void verifyRecordingAggregateLatencyIsCheapWhenDisabled() throws Exceptio List> tasks = new ArrayList>(); final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - for(int i=0; i < numTasks; i++) { + for (int i=0; i < numTasks; i++) { Callable c = new Callable() { @Override public Long call() throws Exception { @@ -202,7 +202,7 @@ public Long call() throws Exception { try { // placeholder try block } finally { - latencyTracker.recordClientLatency(startRecord, "oneOperationCaller", "oneOperationCallee", true, startRecord, 42, httpOperation); + latencyTracker.recordClientLatency(startRecord, "oneOperationCaller", "oneOperationCallee", true, startRecord, 123, httpOperation); } long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); @@ -213,12 +213,13 @@ public Long call() throws Exception { tasks.add(c); } - for(Future fr: executorService.invokeAll(tasks)) { + for (Future fr: executorService.invokeAll(tasks)) { aggregateLatency += fr.get(); } double averageRecordLatency = aggregateLatency/numTasks; - Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), + Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", + averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); } @@ -235,7 +236,7 @@ public void verifyGettingLatencyRecordsIsCheapWhenDisabled() throws Exception { List> tasks = new ArrayList>(); final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - for(int i=0; i < numTasks; i++) { + for (int i=0; i < numTasks; i++) { Callable c = new Callable() { @Override public Long call() throws Exception { @@ -249,12 +250,13 @@ public Long call() throws Exception { tasks.add(c); } - for(Future fr: executorService.invokeAll(tasks)) { + for (Future fr: executorService.invokeAll(tasks)) { aggregateLatency += fr.get(); } double averageRecordLatency = aggregateLatency/numTasks; - Assert.assertTrue(String.format("Average time for getting latency records, %s ms should be in the range [%s, %s).", averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), + Assert.assertTrue(String.format("Average time for getting latency records, %s ms should be in the range [%s, %s).", + averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); } @@ -270,7 +272,7 @@ public void verifyRecordingSingletonLatencyIsCheapWhenEnabled() throws Exception List> tasks = new ArrayList>(); final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - for(int i=0; i < numTasks; i++) { + for (int i=0; i < numTasks; i++) { Callable c = new Callable() { @Override public Long call() throws Exception { @@ -290,12 +292,13 @@ public Long call() throws Exception { tasks.add(c); } - for(Future fr: executorService.invokeAll(tasks)) { + for (Future fr: executorService.invokeAll(tasks)) { aggregateLatency += fr.get(); } double averageRecordLatency = aggregateLatency/numTasks; - Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), + Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", + averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); } @@ -311,7 +314,7 @@ public void verifyRecordingAggregateLatencyIsCheapWhenEnabled() throws Exception List> tasks = new ArrayList>(); final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - for(int i=0; i < numTasks; i++) { + for (int i=0; i < numTasks; i++) { Callable c = new Callable() { @Override public Long call() throws Exception { @@ -320,7 +323,7 @@ public Long call() throws Exception { try { // placeholder try block } finally { - latencyTracker.recordClientLatency(startRecord, "oneOperationCaller", "oneOperationCallee", true, startRecord, 42, httpOperation); + latencyTracker.recordClientLatency(startRecord, "oneOperationCaller", "oneOperationCallee", true, startRecord, 123, httpOperation); } long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); @@ -331,12 +334,13 @@ public Long call() throws Exception { tasks.add(c); } - for(Future fr: executorService.invokeAll(tasks)) { + for (Future fr: executorService.invokeAll(tasks)) { aggregateLatency += fr.get(); } double averageRecordLatency = aggregateLatency/numTasks; - Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), + Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", + averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); } @@ -352,7 +356,7 @@ public void verifyGettingLatencyRecordsIsCheapWhenEnabled() throws Exception { List> tasks = new ArrayList>(); final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - for(int i=0; i < numTasks; i++) { + for (int i=0; i < numTasks; i++) { Callable c = new Callable() { @Override public Long call() throws Exception { @@ -366,12 +370,13 @@ public Long call() throws Exception { tasks.add(c); } - for(Future fr: executorService.invokeAll(tasks)) { + for (Future fr: executorService.invokeAll(tasks)) { aggregateLatency += fr.get(); } double averageRecordLatency = aggregateLatency/numTasks; - Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), + Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", + averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); } @@ -399,20 +404,20 @@ public void verifyNoExceptionOnInvalidInputWhenDisabled() throws Exception { latencyTracker.recordClientLatency(Instant.MAX, Instant.now(), null, null, false, null); latencyTracker.recordClientLatency(Instant.now(), Instant.MIN, null, null, false, null); - latencyTracker.recordClientLatency(null, null, null, false, null, 0,null); - latencyTracker.recordClientLatency(Instant.now(), null, null, false, null, 0,null); - latencyTracker.recordClientLatency(Instant.now(), "test", null, false, null, 0,null); - latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, null, 0,null); - latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), 0,null); - latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), 42, httpOperation); - - latencyTracker.recordClientLatency(null, null, null, null, false, null, 0,null); - latencyTracker.recordClientLatency(Instant.now(), null, null, null, false, null, 0,null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), null, null, false, null, 0,null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", null, false, null, 0,null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, null, 0,null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), 0,null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), 42, httpOperation); + latencyTracker.recordClientLatency(null, null, null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), null, null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), "test", null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), 0, null); + latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), 123, httpOperation); + + latencyTracker.recordClientLatency(null, null, null, null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), null, null, null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), null, null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", null, false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, null, 0, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), 0, null); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), 123, httpOperation); latencyTracker.recordClientLatency(testInstant, Instant.now(), null, null, false, null, 0, null); latencyTracker.recordClientLatency(Instant.MAX, Instant.now(), null, null, false, null, 0, null); @@ -453,7 +458,7 @@ public void verifyNoExceptionOnInvalidInputWhenEnabled() throws Exception { latencyTracker.recordClientLatency(Instant.now(), "test", null, false, null, 0, null); latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, null, 0, null); latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), 0, null); - latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), 42, httpOperation); + latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), 123, httpOperation); latencyTracker.recordClientLatency(null, null, null, null, false, null, 0, null); latencyTracker.recordClientLatency(Instant.now(), null, null, null, false, null, 0, null); @@ -461,7 +466,7 @@ public void verifyNoExceptionOnInvalidInputWhenEnabled() throws Exception { latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", null, false, null, 0, null); latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, null, 0, null); latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), 0, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), 42, httpOperation); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), 123, httpOperation); latencyTracker.recordClientLatency(testInstant, Instant.now(), null, null, false, null, 0, null); latencyTracker.recordClientLatency(Instant.MAX, Instant.now(), null, null, false, null, 0, null); From 266635e108b478fcd876707c6b703e0576d3aaea Mon Sep 17 00:00:00 2001 From: Jeetesh Mangwani Date: Fri, 11 Oct 2019 13:28:35 -0700 Subject: [PATCH 03/13] fix more checkstyle issues --- .../azurebfs/services/TestLatencyTracker.java | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestLatencyTracker.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestLatencyTracker.java index 7ae24059c80af..74d4f64df1ecd 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestLatencyTracker.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestLatencyTracker.java @@ -40,6 +40,7 @@ */ public final class TestLatencyTracker { private static final Logger LOG = LoggerFactory.getLogger(TestLatencyTracker.class); + private static final int TEST_AGGREGATE_LATENCY = 42; private final String filesystemName = "bogusFilesystemName"; private final String accountName = "bogusAccountName"; private final URL url; @@ -94,7 +95,7 @@ public Integer call() throws Exception { for (int i=0; i < numTasks; i++) { latencyDetails = latencyTracker.getClientLatency(); Assert.assertNotNull("LatencyTracker should return non-null record", latencyDetails); - Assert.assertTrue ("Latency record should be in the correct format", Pattern.matches( + Assert.assertTrue("Latency record should be in the correct format", Pattern.matches( "h=[^ ]* t=[^ ]* a=bogusFilesystemName c=bogusAccountName cr=oneOperationCaller ce=oneOperationCallee r=Succeeded l=[0-9]+" + " s=0 e= ci=[^ ]* ri=[^ ]* bs=0 br=0 m=GET u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile", latencyDetails)); } @@ -118,7 +119,7 @@ public void verifyTrackingForAggregateLatencyRecords() throws Exception { @Override public Integer call() throws Exception { // test latency tracking when aggregate latency numbers are also passed - latencyTracker.recordClientLatency(Instant.now(), "oneOperationCaller", "oneOperationCallee", true, Instant.now(), 123, httpOperation); + latencyTracker.recordClientLatency(Instant.now(), "oneOperationCaller", "oneOperationCallee", true, Instant.now(), TEST_AGGREGATE_LATENCY, httpOperation); return 0; } }; @@ -134,7 +135,7 @@ public Integer call() throws Exception { Assert.assertNotNull("LatencyTracker should return non-null record", latencyDetails); Assert.assertTrue("Latency record should be in the correct format", Pattern.matches( "h=[^ ]* t=[^ ]* a=bogusFilesystemName c=bogusAccountName cr=oneOperationCaller ce=oneOperationCallee r=Succeeded l=[0-9]+" - + " ls=[0-9]+ lc=123 s=0 e= ci=[^ ]* ri=[^ ]* bs=0 br=0 m=GET u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile", latencyDetails)); + + " ls=[0-9]+ lc=" + TEST_AGGREGATE_LATENCY + " s=0 e= ci=[^ ]* ri=[^ ]* bs=0 br=0 m=GET u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile", latencyDetails)); } } @@ -202,7 +203,7 @@ public Long call() throws Exception { try { // placeholder try block } finally { - latencyTracker.recordClientLatency(startRecord, "oneOperationCaller", "oneOperationCallee", true, startRecord, 123, httpOperation); + latencyTracker.recordClientLatency(startRecord, "oneOperationCaller", "oneOperationCallee", true, startRecord, TEST_AGGREGATE_LATENCY, httpOperation); } long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); @@ -323,7 +324,7 @@ public Long call() throws Exception { try { // placeholder try block } finally { - latencyTracker.recordClientLatency(startRecord, "oneOperationCaller", "oneOperationCallee", true, startRecord, 123, httpOperation); + latencyTracker.recordClientLatency(startRecord, "oneOperationCaller", "oneOperationCallee", true, startRecord, TEST_AGGREGATE_LATENCY, httpOperation); } long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); @@ -409,7 +410,7 @@ public void verifyNoExceptionOnInvalidInputWhenDisabled() throws Exception { latencyTracker.recordClientLatency(Instant.now(), "test", null, false, null, 0, null); latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, null, 0, null); latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), 0, null); - latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), 123, httpOperation); + latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), TEST_AGGREGATE_LATENCY, httpOperation); latencyTracker.recordClientLatency(null, null, null, null, false, null, 0, null); latencyTracker.recordClientLatency(Instant.now(), null, null, null, false, null, 0, null); @@ -417,7 +418,7 @@ public void verifyNoExceptionOnInvalidInputWhenDisabled() throws Exception { latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", null, false, null, 0, null); latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, null, 0, null); latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), 0, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), 123, httpOperation); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), TEST_AGGREGATE_LATENCY, httpOperation); latencyTracker.recordClientLatency(testInstant, Instant.now(), null, null, false, null, 0, null); latencyTracker.recordClientLatency(Instant.MAX, Instant.now(), null, null, false, null, 0, null); @@ -458,7 +459,7 @@ public void verifyNoExceptionOnInvalidInputWhenEnabled() throws Exception { latencyTracker.recordClientLatency(Instant.now(), "test", null, false, null, 0, null); latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, null, 0, null); latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), 0, null); - latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), 123, httpOperation); + latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), TEST_AGGREGATE_LATENCY, httpOperation); latencyTracker.recordClientLatency(null, null, null, null, false, null, 0, null); latencyTracker.recordClientLatency(Instant.now(), null, null, null, false, null, 0, null); @@ -466,7 +467,7 @@ public void verifyNoExceptionOnInvalidInputWhenEnabled() throws Exception { latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", null, false, null, 0, null); latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, null, 0, null); latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), 0, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), 123, httpOperation); + latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), TEST_AGGREGATE_LATENCY, httpOperation); latencyTracker.recordClientLatency(testInstant, Instant.now(), null, null, false, null, 0, null); latencyTracker.recordClientLatency(Instant.MAX, Instant.now(), null, null, false, null, 0, null); From e7502e3f491bce0c1662d4bbd7a54f0b3f9dfa00 Mon Sep 17 00:00:00 2001 From: Jeetesh Mangwani Date: Thu, 24 Oct 2019 23:19:56 -0700 Subject: [PATCH 04/13] use try-with-resources to reduce the verbosity --- hadoop-tools/hadoop-azure/pom.xml | 5 + .../fs/azurebfs/AzureBlobFileSystemStore.java | 434 +++++----------- .../fs/azurebfs/services/AbfsClient.java | 10 +- .../azurebfs/services/AbfsHttpOperation.java | 49 +- .../fs/azurebfs/services/AbfsInputStream.java | 11 +- .../azurebfs/services/AbfsOutputStream.java | 25 +- .../fs/azurebfs/services/AbfsPerfInfo.java | 132 +++++ ...tencyTracker.java => AbfsPerfTracker.java} | 110 ++-- .../azurebfs/services/AbfsRestOperation.java | 2 +- .../services/TestAbfsPerfTracker.java | 399 +++++++++++++++ .../azurebfs/services/TestLatencyTracker.java | 479 ------------------ 11 files changed, 734 insertions(+), 922 deletions(-) create mode 100644 hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java rename hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/{LatencyTracker.java => AbfsPerfTracker.java} (69%) create mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsPerfTracker.java delete mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestLatencyTracker.java diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml index 1a4250f6667ba..f2af5a97c1f42 100644 --- a/hadoop-tools/hadoop-azure/pom.xml +++ b/hadoop-tools/hadoop-azure/pom.xml @@ -278,6 +278,11 @@ bcpkix-jdk15on test + + org.assertj + assertj-core + test + diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index 7e73592767a94..8102926f91792 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -73,17 +73,8 @@ import org.apache.hadoop.fs.azurebfs.extensions.ExtensionHelper; import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider; import org.apache.hadoop.fs.azurebfs.oauth2.IdentityTransformer; -import org.apache.hadoop.fs.azurebfs.services.AbfsAclHelper; -import org.apache.hadoop.fs.azurebfs.services.AbfsClient; -import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation; -import org.apache.hadoop.fs.azurebfs.services.AbfsInputStream; -import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; -import org.apache.hadoop.fs.azurebfs.services.AbfsPermission; -import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; -import org.apache.hadoop.fs.azurebfs.services.AuthType; -import org.apache.hadoop.fs.azurebfs.services.ExponentialRetryPolicy; -import org.apache.hadoop.fs.azurebfs.services.LatencyTracker; -import org.apache.hadoop.fs.azurebfs.services.SharedKeyCredentials; +import org.apache.hadoop.fs.azurebfs.services.*; +import org.apache.hadoop.fs.azurebfs.services.AbfsPerfTracker; import org.apache.hadoop.fs.azurebfs.utils.Base64; import org.apache.hadoop.fs.azurebfs.utils.CRC64; import org.apache.hadoop.fs.azurebfs.utils.UriUtils; @@ -124,6 +115,7 @@ public class AzureBlobFileSystemStore implements Closeable { private static final String TOKEN_DATE_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSSSSSS'Z'"; private static final String XMS_PROPERTIES_ENCODING = "ISO-8859-1"; private static final int LIST_MAX_RESULTS = 500; + private static final int GET_SET_AGGREGATE_COUNT = 2; private final AbfsConfiguration abfsConfiguration; private final Set azureAtomicRenameDirSet; @@ -132,7 +124,7 @@ public class AzureBlobFileSystemStore implements Closeable { private final AuthType authType; private final UserGroupInformation userGroupInformation; private final IdentityTransformer identityTransformer; - private final LatencyTracker latencyTracker; + private final AbfsPerfTracker abfsPerfTracker; public AzureBlobFileSystemStore(URI uri, boolean isSecureScheme, Configuration configuration) throws IOException { @@ -165,7 +157,7 @@ public AzureBlobFileSystemStore(URI uri, boolean isSecureScheme, Configuration c this.authType = abfsConfiguration.getAuthType(accountName); boolean usingOauth = (authType == AuthType.OAuth); boolean useHttps = (usingOauth || abfsConfiguration.isHttpsAlwaysUsed()) ? true : isSecureScheme; - this.latencyTracker = new LatencyTracker(fileSystemName, accountName, this.abfsConfiguration); + this.abfsPerfTracker = new AbfsPerfTracker(fileSystemName, accountName, this.abfsConfiguration); initializeClient(uri, fileSystemName, accountName, useHttps); this.identityTransformer = new IdentityTransformer(abfsConfiguration.getRawConfiguration()); } @@ -215,16 +207,13 @@ private String[] authorityParts(URI uri) throws InvalidUriAuthorityException, In public boolean getIsNamespaceEnabled() throws AzureBlobFileSystemException { if (!isNamespaceEnabledSet) { - final Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; LOG.debug("Get root ACL status"); - try { + try (AbfsPerfInfo tracker = startTracking("getIsNamespaceEnabled", "getAclStatus")) { AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + AbfsHttpConstants.ROOT_PATH); - res = op.getResult(); + tracker.registerResult(op.getResult()); isNamespaceEnabled = true; - success = true; + tracker.registerSuccess(true); } catch (AbfsRestOperationException ex) { // Get ACL status is a HEAD request, its response doesn't contain errorCode // So can only rely on its status code to determine its account type. @@ -232,8 +221,6 @@ public boolean getIsNamespaceEnabled() throws AzureBlobFileSystemException { throw ex; } isNamespaceEnabled = false; - } finally { - latencyTracker.recordClientLatency(start, "getIsNamespaceEnabled", "getAclStatus", success, res); } isNamespaceEnabledSet = true; } @@ -277,37 +264,27 @@ public AbfsConfiguration getAbfsConfiguration() { } public Hashtable getFilesystemProperties() throws AzureBlobFileSystemException { - final Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - - try { + try (AbfsPerfInfo tracker = startTracking("getFilesystemProperties", "getFilesystemProperties")) { LOG.debug("getFilesystemProperties for filesystem: {}", client.getFileSystem()); final Hashtable parsedXmsProperties; final AbfsRestOperation op = client.getFilesystemProperties(); - res = op.getResult(); + tracker.registerResult(op.getResult()); final String xMsProperties = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_PROPERTIES); parsedXmsProperties = parseCommaSeparatedXmsProperties(xMsProperties); - success = true; + tracker.registerSuccess(true); return parsedXmsProperties; - } finally { - latencyTracker.recordClientLatency(start, "getFilesystemProperties", "getFilesystemProperties", success, res); } } public void setFilesystemProperties(final Hashtable properties) throws AzureBlobFileSystemException { - final Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - - try { + try (AbfsPerfInfo tracker = startTracking("setFilesystemProperties", "setFilesystemProperties")) { if (properties == null || properties.isEmpty()) { return; } @@ -324,46 +301,32 @@ public void setFilesystemProperties(final Hashtable properties) } final AbfsRestOperation op = client.setFilesystemProperties(commaSeparatedProperties); - res = op.getResult(); - - success = true; - } finally { - latencyTracker.recordClientLatency(start, "setFilesystemProperties", "setFilesystemProperties", success, res); + tracker.registerResult(op.getResult()).registerSuccess(true); } } public Hashtable getPathStatus(final Path path) throws AzureBlobFileSystemException { - final Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - - try { + try (AbfsPerfInfo tracker = startTracking("getPathStatus", "getPathStatus")){ LOG.debug("getPathStatus for filesystem: {} path: {}", client.getFileSystem(), path); final Hashtable parsedXmsProperties; final AbfsRestOperation op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); - res = op.getResult(); + tracker.registerResult(op.getResult()); final String xMsProperties = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_PROPERTIES); parsedXmsProperties = parseCommaSeparatedXmsProperties(xMsProperties); - success = true; + tracker.registerSuccess(true); return parsedXmsProperties; - } finally { - latencyTracker.recordClientLatency(start, "getPathStatus", "getPathStatus", success, res); } } public void setPathProperties(final Path path, final Hashtable properties) throws AzureBlobFileSystemException { - final Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - - try { + try (AbfsPerfInfo tracker = startTracking("setPathProperties", "setPathProperties")){ LOG.debug("setFilesystemProperties for filesystem: {} path: {} with properties: {}", client.getFileSystem(), path, @@ -376,57 +339,33 @@ public void setPathProperties(final Path path, final Hashtable p throw new InvalidAbfsRestOperationException(ex); } final AbfsRestOperation op = client.setPathProperties(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), commaSeparatedProperties); - res = op.getResult(); - - success = true; - } finally { - latencyTracker.recordClientLatency(start, "setPathProperties", "setPathProperties", success, res); + tracker.registerResult(op.getResult()).registerSuccess(true); } } public void createFilesystem() throws AzureBlobFileSystemException { - final Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - - try { + try (AbfsPerfInfo tracker = startTracking("createFilesystem", "createFilesystem")){ LOG.debug("createFilesystem for filesystem: {}", client.getFileSystem()); final AbfsRestOperation op = client.createFilesystem(); - res = op.getResult(); - - success = true; - } finally { - latencyTracker.recordClientLatency(start, "createFilesystem", "createFilesystem", success, res); + tracker.registerResult(op.getResult()).registerSuccess(true); } } public void deleteFilesystem() throws AzureBlobFileSystemException { - Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - - try { + try (AbfsPerfInfo tracker = startTracking("deleteFilesystem", "deleteFilesystem")) { LOG.debug("deleteFilesystem for filesystem: {}", client.getFileSystem()); final AbfsRestOperation op = client.deleteFilesystem(); - res = op.getResult(); - - success = true; - } finally { - latencyTracker.recordClientLatency(start, "deleteFilesystem", "deleteFilesystem", success, res); + tracker.registerResult(op.getResult()).registerSuccess(true); } } public OutputStream createFile(final Path path, final boolean overwrite, final FsPermission permission, final FsPermission umask) throws AzureBlobFileSystemException { - final Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - - try { + try (AbfsPerfInfo tracker = startTracking("createFile", "createPath")) { boolean isNamespaceEnabled = getIsNamespaceEnabled(); LOG.debug("createFile filesystem: {} path: {} overwrite: {} permission: {} umask: {} isNamespaceEnabled: {}", client.getFileSystem(), @@ -439,9 +378,7 @@ public OutputStream createFile(final Path path, final boolean overwrite, final F final AbfsRestOperation op = client.createPath(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), true, overwrite, isNamespaceEnabled ? getOctalNotation(permission) : null, isNamespaceEnabled ? getOctalNotation(umask) : null); - res = op.getResult(); - - success = true; + tracker.registerResult(op.getResult()).registerSuccess(true); return new AbfsOutputStream( client, @@ -450,18 +387,12 @@ public OutputStream createFile(final Path path, final boolean overwrite, final F abfsConfiguration.getWriteBufferSize(), abfsConfiguration.isFlushEnabled(), abfsConfiguration.isOutputStreamFlushDisabled()); - } finally { - latencyTracker.recordClientLatency(start, "createFile", "createPath", success, res); } } public void createDirectory(final Path path, final FsPermission permission, final FsPermission umask) throws AzureBlobFileSystemException { - final Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - - try { + try (AbfsPerfInfo tracker = startTracking("createDirectory", "createPath")) { boolean isNamespaceEnabled = getIsNamespaceEnabled(); LOG.debug("createDirectory filesystem: {} path: {} permission: {} umask: {} isNamespaceEnabled: {}", client.getFileSystem(), @@ -473,27 +404,19 @@ public void createDirectory(final Path path, final FsPermission permission, fina final AbfsRestOperation op = client.createPath(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), false, true, isNamespaceEnabled ? getOctalNotation(permission) : null, isNamespaceEnabled ? getOctalNotation(umask) : null); - res = op.getResult(); - - success = true; - } finally { - latencyTracker.recordClientLatency(start, "createDirectory", "createPath", success, res); + tracker.registerResult(op.getResult()).registerSuccess(true); } } public AbfsInputStream openFileForRead(final Path path, final FileSystem.Statistics statistics) throws AzureBlobFileSystemException { - final Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - - try { + try (AbfsPerfInfo tracker = startTracking("openFileForRead", "getPathStatus")) { LOG.debug("openFileForRead filesystem: {} path: {}", client.getFileSystem(), path); final AbfsRestOperation op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); - res = op.getResult(); + tracker.registerResult(op.getResult()); final String resourceType = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_RESOURCE_TYPE); final long contentLength = Long.parseLong(op.getResult().getResponseHeader(HttpHeaderConfigurations.CONTENT_LENGTH)); @@ -507,32 +430,26 @@ public AbfsInputStream openFileForRead(final Path path, final FileSystem.Statist null); } - success = true; + tracker.registerSuccess(true); // Add statistics for InputStream return new AbfsInputStream(client, statistics, AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), contentLength, abfsConfiguration.getReadBufferSize(), abfsConfiguration.getReadAheadQueueDepth(), abfsConfiguration.getTolerateOobAppends(), eTag); - } finally { - latencyTracker.recordClientLatency(start, "openFileForRead", "getPathStatus", success, res); } } public OutputStream openFileForWrite(final Path path, final boolean overwrite) throws AzureBlobFileSystemException { - final Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - - try { + try (AbfsPerfInfo tracker = startTracking("openFileForWrite", "getPathStatus")) { LOG.debug("openFileForWrite filesystem: {} path: {} overwrite: {}", client.getFileSystem(), path, overwrite); final AbfsRestOperation op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); - res = op.getResult(); + tracker.registerResult(op.getResult()); final String resourceType = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_RESOURCE_TYPE); final Long contentLength = Long.valueOf(op.getResult().getResponseHeader(HttpHeaderConfigurations.CONTENT_LENGTH)); @@ -547,7 +464,7 @@ public OutputStream openFileForWrite(final Path path, final boolean overwrite) t final long offset = overwrite ? 0 : contentLength; - success = true; + tracker.registerSuccess(true); return new AbfsOutputStream( client, @@ -556,16 +473,14 @@ public OutputStream openFileForWrite(final Path path, final boolean overwrite) t abfsConfiguration.getWriteBufferSize(), abfsConfiguration.isFlushEnabled(), abfsConfiguration.isOutputStreamFlushDisabled()); - } finally { - latencyTracker.recordClientLatency(start, "openFileForWrite", "getPathStatus", success, res); } } public void rename(final Path source, final Path destination) throws AzureBlobFileSystemException { - final Instant startAggregate = latencyTracker.getLatencyInstant(); + final Instant startAggregate = abfsPerfTracker.getLatencyInstant(); long countAggregate = 0; - boolean shouldContinue = true; + boolean shouldContinue; if (isAtomicRenameKey(source.getName())) { LOG.warn("The atomic rename feature is not supported by the ABFS scheme; however rename," @@ -580,23 +495,17 @@ public void rename(final Path source, final Path destination) throws String continuation = null; do { - Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - - try { + try (AbfsPerfInfo tracker = startTracking("rename", "renamePath")) { AbfsRestOperation op = client.renamePath(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(source), AbfsHttpConstants.FORWARD_SLASH + getRelativePath(destination), continuation); - res = op.getResult(); + tracker.registerResult(op.getResult()); continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION); - success = true; + tracker.registerSuccess(true); countAggregate++; shouldContinue = continuation != null && !continuation.isEmpty(); - } finally { - if (shouldContinue) { - latencyTracker.recordClientLatency(start, "rename", "renamePath", success, res); - } else { - latencyTracker.recordClientLatency(start, "rename", "renamePath", success, startAggregate, countAggregate, res); + + if (!shouldContinue) { + tracker.registerAggregates(startAggregate, countAggregate); } } } while (shouldContinue); @@ -604,7 +513,7 @@ public void rename(final Path source, final Path destination) throws public void delete(final Path path, final boolean recursive) throws AzureBlobFileSystemException { - final Instant startAggregate = latencyTracker.getLatencyInstant(); + final Instant startAggregate = abfsPerfTracker.getLatencyInstant(); long countAggregate = 0; boolean shouldContinue = true; @@ -616,35 +525,24 @@ public void delete(final Path path, final boolean recursive) String continuation = null; do { - Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - - try { + try (AbfsPerfInfo tracker = startTracking("delete", "deletePath")) { AbfsRestOperation op = client.deletePath( AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), recursive, continuation); - res = op.getResult(); + tracker.registerResult(op.getResult()); continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION); - success = true; + tracker.registerSuccess(true); countAggregate++; shouldContinue = continuation != null && !continuation.isEmpty(); - } finally { - if (shouldContinue) { - latencyTracker.recordClientLatency(start, "delete", "deletePath", success, res); - } else { - latencyTracker.recordClientLatency(start, "delete", "deletePath", success, startAggregate, countAggregate, res); + + if (!shouldContinue) { + tracker.registerAggregates(startAggregate, countAggregate); } } } while (shouldContinue); } public FileStatus getFileStatus(final Path path) throws IOException { - final Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - String calleeName = null; - - try { + try (AbfsPerfInfo tracker = startTracking("getFileStatus", "undetermined")) { boolean isNamespaceEnabled = getIsNamespaceEnabled(); LOG.debug("getFileStatus filesystem: {} path: {} isNamespaceEnabled: {}", client.getFileSystem(), @@ -654,18 +552,18 @@ public FileStatus getFileStatus(final Path path) throws IOException { final AbfsRestOperation op; if (path.isRoot()) { if (isNamespaceEnabled) { - calleeName = "getAclStatus"; + tracker.registerCallee("getAclStatus"); op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + AbfsHttpConstants.ROOT_PATH); } else { - calleeName = "getFilesystemProperties"; + tracker.registerCallee("getFilesystemProperties"); op = client.getFilesystemProperties(); } } else { - calleeName = "getPathStatus"; + tracker.registerCallee("getPathStatus"); op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); } - res = op.getResult(); + tracker.registerResult(op.getResult()); final long blockSize = abfsConfiguration.getAzureBlockSize(); final AbfsHttpOperation result = op.getResult(); @@ -694,7 +592,7 @@ public FileStatus getFileStatus(final Path path) throws IOException { false, primaryUserGroup); - success = true; + tracker.registerSuccess(true); return new VersionedFileStatus( transformedOwner, @@ -709,8 +607,6 @@ public FileStatus getFileStatus(final Path path) throws IOException { parseLastModifiedTime(lastModified), path, eTag); - } finally { - latencyTracker.recordClientLatency(start, "getFileStatus", calleeName, success, res); } } @@ -735,7 +631,7 @@ public FileStatus[] listStatus(final Path path) throws IOException { * */ @InterfaceStability.Unstable public FileStatus[] listStatus(final Path path, final String startFrom) throws IOException { - final Instant startAggregate = latencyTracker.getLatencyInstant(); + final Instant startAggregate = abfsPerfTracker.getLatencyInstant(); long countAggregate = 0; boolean shouldContinue = true; @@ -756,13 +652,9 @@ public FileStatus[] listStatus(final Path path, final String startFrom) throws I ArrayList fileStatuses = new ArrayList<>(); do { - Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - - try { + try (AbfsPerfInfo tracker = startTracking("listStatus", "listPath")) { AbfsRestOperation op = client.listPath(relativePath, false, LIST_MAX_RESULTS, continuation); - res = op.getResult(); + tracker.registerResult(op.getResult()); continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION); ListResultSchema retrievedSchema = op.getResult().getListResultSchema(); if (retrievedSchema == null) { @@ -808,14 +700,12 @@ public FileStatus[] listStatus(final Path path, final String startFrom) throws I entry.eTag())); } - success = true; + tracker.registerSuccess(true); countAggregate++; shouldContinue = continuation != null && !continuation.isEmpty(); - } finally { - if (shouldContinue) { - latencyTracker.recordClientLatency(start, "listStatus", "listPath", success, res); - } else { - latencyTracker.recordClientLatency(start, "listStatus", "listPath", success, startAggregate, countAggregate, res); + + if (!shouldContinue) { + tracker.registerAggregates(startAggregate, countAggregate); } } } while (shouldContinue); @@ -880,11 +770,7 @@ private String generateContinuationTokenForNonXns(final String path, final Strin public void setOwner(final Path path, final String owner, final String group) throws AzureBlobFileSystemException { - final Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - - try { + try (AbfsPerfInfo tracker = startTracking("setOwner", "setOwner")) { if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( "This operation is only valid for storage accounts with the hierarchical namespace enabled."); @@ -902,21 +788,13 @@ public void setOwner(final Path path, final String owner, final String group) th final AbfsRestOperation op = client.setOwner(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), transformedOwner, transformedGroup); - res = op.getResult(); - - success = true; - } finally { - latencyTracker.recordClientLatency(start, "setOwner", "setOwner", success, res); + tracker.registerResult(op.getResult()).registerSuccess(true); } } public void setPermission(final Path path, final FsPermission permission) throws AzureBlobFileSystemException { - final Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - - try { + try (AbfsPerfInfo tracker = startTracking("setPermission", "setPermission")) { if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( "This operation is only valid for storage accounts with the hierarchical namespace enabled."); @@ -930,26 +808,13 @@ public void setPermission(final Path path, final FsPermission permission) throws final AbfsRestOperation op = client.setPermission(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), String.format(AbfsHttpConstants.PERMISSION_FORMAT, permission.toOctal())); - res = op.getResult(); - - success = true; - } finally { - latencyTracker.recordClientLatency(start, "setPermission", "setPermission", success, res); + tracker.registerResult(op.getResult()).registerSuccess(true); } } public void modifyAclEntries(final Path path, final List aclSpec) throws AzureBlobFileSystemException { - final Instant startAggregate = latencyTracker.getLatencyInstant(); - long countAggregate = 0; - Instant startSet = null; - - boolean successGet = false; - boolean successSet = false; - AbfsHttpOperation resultGet = null; - AbfsHttpOperation resultSet = null; - - try { + try (AbfsPerfInfo trackerGet = startTracking("modifyAclEntries", "getAclStatus")) { if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( "This operation is only valid for storage accounts with the hierarchical namespace enabled."); @@ -966,40 +831,25 @@ public void modifyAclEntries(final Path path, final List aclSpec) thro boolean useUpn = AbfsAclHelper.isUpnFormatAclEntries(modifyAclEntries); final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), useUpn); - resultGet = op.getResult(); + trackerGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); AbfsAclHelper.modifyAclEntriesInternal(aclEntries, modifyAclEntries); - successGet = true; - countAggregate++; - startSet = latencyTracker.getLatencyInstant(); + trackerGet.registerSuccess(true).finishTracking(); - final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), - AbfsAclHelper.serializeAclSpec(aclEntries), eTag); - resultSet = setAclOp.getResult(); - - successSet = true; - countAggregate++; - } finally { - latencyTracker.recordClientLatency(startAggregate, startSet, "modifyAclEntries", "getAclStatus", successGet, resultGet); - latencyTracker.recordClientLatency(startSet, "modifyAclEntries", "setAcl", successSet, startAggregate, countAggregate, resultSet); + try (AbfsPerfInfo trackerSet = startTracking("modifyAclEntries", "setAcl")) { + final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + AbfsAclHelper.serializeAclSpec(aclEntries), eTag); + trackerSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(trackerGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); + } } } public void removeAclEntries(final Path path, final List aclSpec) throws AzureBlobFileSystemException { - final Instant startAggregate = latencyTracker.getLatencyInstant(); - long countAggregate = 0; - Instant startSet = null; - - boolean successGet = false; - boolean successSet = false; - AbfsHttpOperation resultGet = null; - AbfsHttpOperation resultSet = null; - - try { + try (AbfsPerfInfo trackerGet = startTracking("removeAclEntries", "getAclStatus")) { if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( "This operation is only valid for storage accounts with the hierarchical namespace enabled."); @@ -1016,40 +866,25 @@ public void removeAclEntries(final Path path, final List aclSpec) thro boolean isUpnFormat = AbfsAclHelper.isUpnFormatAclEntries(removeAclEntries); final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), isUpnFormat); - resultGet = op.getResult(); + trackerGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); AbfsAclHelper.removeAclEntriesInternal(aclEntries, removeAclEntries); - successGet = true; - countAggregate++; - startSet = latencyTracker.getLatencyInstant(); + trackerGet.registerSuccess(true).finishTracking(); - final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), - AbfsAclHelper.serializeAclSpec(aclEntries), eTag); - resultSet = setAclOp.getResult(); - - successSet = true; - countAggregate++; - } finally { - latencyTracker.recordClientLatency(startAggregate, startSet, "removeAclEntries", "getAclStatus", successGet, resultGet); - latencyTracker.recordClientLatency(startSet, "removeAclEntries", "setAcl", successSet, startAggregate, countAggregate, resultSet); + try (AbfsPerfInfo trackerSet = startTracking("removeAclEntries", "setAcl")) { + final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + AbfsAclHelper.serializeAclSpec(aclEntries), eTag); + trackerSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(trackerGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); + } } } public void removeDefaultAcl(final Path path) throws AzureBlobFileSystemException { - final Instant startAggregate = latencyTracker.getLatencyInstant(); - long countAggregate = 0; - Instant startSet = null; - - boolean successGet = false; - boolean successSet = false; - AbfsHttpOperation resultGet = null; - AbfsHttpOperation resultSet = null; - - try { + try (AbfsPerfInfo trackerGet = startTracking("removeDefaultAcl", "getAclStatus")) { if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( "This operation is only valid for storage accounts with the hierarchical namespace enabled."); @@ -1061,7 +896,7 @@ public void removeDefaultAcl(final Path path) throws AzureBlobFileSystemExceptio path.toString()); final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); - resultGet = op.getResult(); + trackerGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); final Map defaultAclEntries = new HashMap<>(); @@ -1074,33 +909,18 @@ public void removeDefaultAcl(final Path path) throws AzureBlobFileSystemExceptio aclEntries.keySet().removeAll(defaultAclEntries.keySet()); - successGet = true; - countAggregate++; - startSet = latencyTracker.getLatencyInstant(); - - final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), - AbfsAclHelper.serializeAclSpec(aclEntries), eTag); - resultSet = setAclOp.getResult(); + trackerGet.registerSuccess(true).finishTracking(); - successSet = true; - countAggregate++; - } finally { - latencyTracker.recordClientLatency(startAggregate, startSet, "removeDefaultAcl", "getAclStatus", successGet, resultGet); - latencyTracker.recordClientLatency(startSet, "removeDefaultAcl", "setAcl", successSet, startAggregate, countAggregate, resultSet); + try (AbfsPerfInfo trackerSet = startTracking("removeDefaultAcl", "setAcl")) { + final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + AbfsAclHelper.serializeAclSpec(aclEntries), eTag); + trackerSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(trackerGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); + } } } public void removeAcl(final Path path) throws AzureBlobFileSystemException { - final Instant startAggregate = latencyTracker.getLatencyInstant(); - long countAggregate = 0; - Instant startSet = null; - - boolean successGet = false; - boolean successSet = false; - AbfsHttpOperation resultGet = null; - AbfsHttpOperation resultSet = null; - - try { + try (AbfsPerfInfo trackerGet = startTracking("removeAcl", "getAclStatus")){ if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( "This operation is only valid for storage accounts with the hierarchical namespace enabled."); @@ -1111,7 +931,7 @@ public void removeAcl(final Path path) throws AzureBlobFileSystemException { client.getFileSystem(), path.toString()); final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); - resultGet = op.getResult(); + trackerGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); @@ -1121,34 +941,18 @@ public void removeAcl(final Path path) throws AzureBlobFileSystemException { newAclEntries.put(AbfsHttpConstants.ACCESS_GROUP, aclEntries.get(AbfsHttpConstants.ACCESS_GROUP)); newAclEntries.put(AbfsHttpConstants.ACCESS_OTHER, aclEntries.get(AbfsHttpConstants.ACCESS_OTHER)); - successGet = true; - countAggregate++; - startSet = latencyTracker.getLatencyInstant(); - - final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), - AbfsAclHelper.serializeAclSpec(newAclEntries), eTag); - - resultSet = setAclOp.getResult(); + trackerGet.registerSuccess(true).finishTracking(); - successSet = true; - countAggregate++; - } finally { - latencyTracker.recordClientLatency(startAggregate, startSet, "removeAcl", "getAclStatus", successGet, resultGet); - latencyTracker.recordClientLatency(startSet, "removeAcl", "setAcl", successSet, startAggregate, countAggregate, resultSet); + try (AbfsPerfInfo trackerSet = startTracking("removeAcl", "setAcl")) { + final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + AbfsAclHelper.serializeAclSpec(newAclEntries), eTag); + trackerSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(trackerGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); + } } } public void setAcl(final Path path, final List aclSpec) throws AzureBlobFileSystemException { - final Instant startAggregate = latencyTracker.getLatencyInstant(); - long countAggregate = 0; - Instant startSet = null; - - boolean successGet = false; - boolean successSet = false; - AbfsHttpOperation resultGet = null; - AbfsHttpOperation resultSet = null; - - try { + try (AbfsPerfInfo trackerGet = startTracking("setAcl", "getAclStatus")) { if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( "This operation is only valid for storage accounts with the hierarchical namespace enabled."); @@ -1165,35 +969,25 @@ public void setAcl(final Path path, final List aclSpec) throws AzureBl final boolean isUpnFormat = AbfsAclHelper.isUpnFormatAclEntries(aclEntries); final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), isUpnFormat); - resultGet = op.getResult(); + trackerGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); final Map getAclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); AbfsAclHelper.setAclEntriesInternal(aclEntries, getAclEntries); - startSet = latencyTracker.getLatencyInstant(); - successGet = true; - countAggregate++; - - final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), - AbfsAclHelper.serializeAclSpec(aclEntries), eTag); - resultSet = setAclOp.getResult(); + trackerGet.registerSuccess(true).finishTracking(); - successSet = true; - countAggregate++; - } finally { - latencyTracker.recordClientLatency(startAggregate, startSet, "setAcl", "getAclStatus", successGet, resultGet); - latencyTracker.recordClientLatency(startSet, "setAcl", "setAcl", successSet, startAggregate, countAggregate, resultSet); + try (AbfsPerfInfo trackerSet = startTracking("setAcl", "setAcl")) { + final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + AbfsAclHelper.serializeAclSpec(aclEntries), eTag); + trackerSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(trackerGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); + } } } public AclStatus getAclStatus(final Path path) throws IOException { - final Instant start = latencyTracker.getLatencyInstant(); - boolean success = false; - AbfsHttpOperation result = null; - - try { + try (AbfsPerfInfo tracker = startTracking("getAclStatus", "getAclStatus")) { if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( "This operation is only valid for storage accounts with the hierarchical namespace enabled."); @@ -1204,18 +998,18 @@ public AclStatus getAclStatus(final Path path) throws IOException { client.getFileSystem(), path.toString()); AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); - result = op.getResult(); + tracker.registerResult(op.getResult()); final String transformedOwner = identityTransformer.transformIdentityForGetRequest( - result.getResponseHeader(HttpHeaderConfigurations.X_MS_OWNER), + op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_OWNER), true, userName); final String transformedGroup = identityTransformer.transformIdentityForGetRequest( - result.getResponseHeader(HttpHeaderConfigurations.X_MS_GROUP), + op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_GROUP), false, primaryUserGroup); - final String permissions = result.getResponseHeader(HttpHeaderConfigurations.X_MS_PERMISSIONS); + final String permissions = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_PERMISSIONS); final String aclSpecString = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL); final List aclEntries = AclEntry.parseAclSpec(AbfsAclHelper.processAclString(aclSpecString), true); @@ -1230,10 +1024,8 @@ public AclStatus getAclStatus(final Path path) throws IOException { aclStatusBuilder.setPermission(fsPermission); aclStatusBuilder.stickyBit(fsPermission.getStickyBit()); aclStatusBuilder.addEntries(aclEntries); - success = true; + tracker.registerSuccess(true); return aclStatusBuilder.build(); - } finally { - latencyTracker.recordClientLatency(start, "getAclStatus", "getAclStatus", success, result); } } @@ -1275,7 +1067,7 @@ private void initializeClient(URI uri, String fileSystemName, String accountName abfsConfiguration.getRawConfiguration()); } - this.client = new AbfsClient(baseUrl, creds, abfsConfiguration, new ExponentialRetryPolicy(), tokenProvider, latencyTracker); + this.client = new AbfsClient(baseUrl, creds, abfsConfiguration, new ExponentialRetryPolicy(), tokenProvider, abfsPerfTracker); } private String getOctalNotation(FsPermission fsPermission) { @@ -1416,6 +1208,10 @@ private boolean isKeyForDirectorySet(String key, Set dirSet) { return false; } + private AbfsPerfInfo startTracking(String callerName, String calleeName) { + return new AbfsPerfInfo(abfsPerfTracker, callerName, calleeName); + } + private static class VersionedFileStatus extends FileStatus { private final String version; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java index 77cc34204147a..dea5e94ca8274 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java @@ -60,7 +60,7 @@ public class AbfsClient implements Closeable { private final String filesystem; private final AbfsConfiguration abfsConfiguration; private final String userAgent; - private final LatencyTracker latencyTracker; + private final AbfsPerfTracker abfsPerfTracker; private final AccessTokenProvider tokenProvider; @@ -69,7 +69,7 @@ public AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredent final AbfsConfiguration abfsConfiguration, final ExponentialRetryPolicy exponentialRetryPolicy, final AccessTokenProvider tokenProvider, - final LatencyTracker latencyTracker) { + final AbfsPerfTracker abfsPerfTracker) { this.baseUrl = baseUrl; this.sharedKeyCredentials = sharedKeyCredentials; String baseUrlString = baseUrl.toString(); @@ -90,7 +90,7 @@ public AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredent this.userAgent = initializeUserAgent(abfsConfiguration, sslProviderName); this.tokenProvider = tokenProvider; - this.latencyTracker = latencyTracker; + this.abfsPerfTracker = abfsPerfTracker; } @Override @@ -104,8 +104,8 @@ public String getFileSystem() { return filesystem; } - protected LatencyTracker getLatencyTracker() { - return latencyTracker; + protected AbfsPerfTracker getAbfsPerfTracker() { + return abfsPerfTracker; } ExponentialRetryPolicy getRetryPolicy() { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java index 94cebf0dc541b..3b27e218626de 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java @@ -173,32 +173,29 @@ public String toKvpString() { urlStr = "https%3A%2F%2Ffailed%2Fto%2Fencode%2Furl"; } - final StringBuilder sb = new StringBuilder(); - sb.append("s="); - sb.append(statusCode); - sb.append(" e="); - sb.append(storageErrorCode); - sb.append(" ci="); - sb.append(clientRequestId); - sb.append(" ri="); - sb.append(requestId); - if (isTraceEnabled) { - sb.append(" ct="); - sb.append(connectionTimeMs); - sb.append(" st="); - sb.append(sendRequestTimeMs); - sb.append(" rt="); - sb.append(recvResponseTimeMs); - } - sb.append(" bs="); - sb.append(bytesSent); - sb.append(" br="); - sb.append(bytesReceived); - sb.append(" m="); - sb.append(method); - sb.append(" u="); - sb.append(urlStr); - return sb.toString(); + return new StringBuilder() + .append("s=") + .append(statusCode) + .append(" e=") + .append(storageErrorCode) + .append(" ci=") + .append(clientRequestId) + .append(" ri=") + .append(requestId) + .append(" ct=") + .append(connectionTimeMs) + .append(" st=") + .append(sendRequestTimeMs) + .append(" rt=") + .append(recvResponseTimeMs) + .append(" bs=") + .append(bytesSent) + .append(" br=") + .append(bytesReceived) + .append(" m=") + .append(method) + .append(" u=") + .append(urlStr).toString(); } /** diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java index 1dfe4e7f74bae..ecccf814cd33c 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java @@ -22,7 +22,6 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.net.HttpURLConnection; -import java.time.Instant; import com.google.common.base.Preconditions; @@ -227,13 +226,9 @@ int readRemote(long position, byte[] b, int offset, int length) throws IOExcepti throw new IllegalArgumentException("requested read length is more than will fit after requested offset in buffer"); } final AbfsRestOperation op; - final Instant start = client.getLatencyTracker().getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - try { + try (AbfsPerfInfo tracker = new AbfsPerfInfo(client.getAbfsPerfTracker(), "readRemote", "read")) { op = client.read(path, position, b, offset, length, tolerateOobAppends ? "*" : eTag); - res = op.getResult(); - success = true; + tracker.registerResult(op.getResult()).registerSuccess(true); } catch (AzureBlobFileSystemException ex) { if (ex instanceof AbfsRestOperationException) { AbfsRestOperationException ere = (AbfsRestOperationException) ex; @@ -242,8 +237,6 @@ int readRemote(long position, byte[] b, int offset, int length) throws IOExcepti } } throw new IOException(ex); - } finally { - client.getLatencyTracker().recordClientLatency(start, "readRemote", "read", success, res); } long bytesRead = op.getResult().getBytesReceived(); if (bytesRead > Integer.MAX_VALUE) { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java index 3f459a09832c4..b9bd043b586b2 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java @@ -23,7 +23,6 @@ import java.io.InterruptedIOException; import java.io.OutputStream; import java.net.HttpURLConnection; -import java.time.Instant; import java.nio.ByteBuffer; import java.util.Locale; import java.util.concurrent.ConcurrentLinkedDeque; @@ -290,17 +289,12 @@ private synchronized void writeCurrentBufferToService() throws IOException { final Future job = completionService.submit(new Callable() { @Override public Void call() throws Exception { - final Instant start = client.getLatencyTracker().getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - try { - res = client.append(path, offset, bytes, 0, - bytesLength).getResult(); + try (AbfsPerfInfo tracker = new AbfsPerfInfo(client.getAbfsPerfTracker(), "writeCurrentBufferToService", "append")) { + tracker.registerResult(client.append(path, offset, bytes, 0, + bytesLength).getResult()); byteBufferPool.putBuffer(ByteBuffer.wrap(bytes)); - success = true; + tracker.registerSuccess(true); return null; - } finally { - client.getLatencyTracker().recordClientLatency(start, "writeCurrentBufferToService", "append", success, res); } } }); @@ -343,13 +337,8 @@ private synchronized void flushWrittenBytesToServiceAsync() throws IOException { private synchronized void flushWrittenBytesToServiceInternal(final long offset, final boolean retainUncommitedData, final boolean isClose) throws IOException { - final Instant start = client.getLatencyTracker().getLatencyInstant(); - boolean success = false; - AbfsHttpOperation res = null; - - try { - res = client.flush(path, offset, retainUncommitedData, isClose).getResult(); - success = true; + try (AbfsPerfInfo tracker = new AbfsPerfInfo(client.getAbfsPerfTracker(), "flushWrittenBytesToServiceInternal", "flush")) { + tracker.registerResult(client.flush(path, offset, retainUncommitedData, isClose).getResult()).registerSuccess(true); } catch (AzureBlobFileSystemException ex) { if (ex instanceof AbfsRestOperationException) { if (((AbfsRestOperationException) ex).getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { @@ -357,8 +346,6 @@ private synchronized void flushWrittenBytesToServiceInternal(final long offset, } } throw new IOException(ex); - } finally { - client.getLatencyTracker().recordClientLatency(start, "flushWrittenBytesToServiceInternal", "flush", success, res); } this.lastFlushOffset = offset; } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java new file mode 100644 index 0000000000000..9760c57c691f6 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import java.time.Instant; + +/** + * {@code AbfsPerfInfo} holds information on ADLS Gen 2 API performance observed by {@code AbfsClient}. Every + * Abfs request keeps adding its information (success/failure, latency etc) to its {@code AbfsPerfInfo}'s object + * as and when it becomes available. When the request is over, the performance information is recorded as + * the {@code AutoCloseable} {@code AbfsPerfInfo} object is "closed". + */ +public final class AbfsPerfInfo implements AutoCloseable { + + // the tracker which will be extracting perf info out of this object + private AbfsPerfTracker abfsPerfTracker; + + // the caller name + private String callerName; + + // the callee name + private String calleeName; + + // time when this tracking started + private Instant trackingStart; + + // time when this tracking ended + private Instant trackingEnd; + + // whether the tracked request was successful + private boolean success; + + // time when the aggregate operation (to which this request belongs) started + private Instant aggregateStart; + + // number of requests in the aggregate operation (to which this request belongs) + private long aggregateCount; + + // result of the request + private AbfsHttpOperation res; + + public AbfsPerfInfo(AbfsPerfTracker abfsPerfTracker, String callerName, String calleeName) { + this.callerName = callerName; + this.calleeName = calleeName; + this.abfsPerfTracker = abfsPerfTracker; + this.success = false; + this.trackingStart = abfsPerfTracker.getLatencyInstant(); + } + + public AbfsPerfInfo registerSuccess(boolean success) { + this.success = success; + return this; + } + + public AbfsPerfInfo registerResult(AbfsHttpOperation res) { + this.res = res; + return this; + } + + public AbfsPerfInfo registerAggregates(Instant aggregateStart, long aggregateCount) { + this.aggregateStart = aggregateStart; + this.aggregateCount = aggregateCount; + return this; + } + + public AbfsPerfInfo finishTracking() + { + if (this.trackingEnd == null) { + this.trackingEnd = abfsPerfTracker.getLatencyInstant(); + } + + return this; + } + + public AbfsPerfInfo registerCallee(String calleeName) { + this.calleeName = calleeName; + return this; + } + + @Override + public void close() { + abfsPerfTracker.trackInfo(this.finishTracking()); + } + + public String getCallerName() { + return callerName; + }; + + public String getCalleeName() { + return calleeName; + } + + public Instant getTrackingStart() { + return trackingStart; + } + + public Instant getTrackingEnd() { + return trackingEnd; + } + + public boolean getSuccess() { + return success; + } + + public Instant getAggregateStart() { + return aggregateStart; + } + + public long getAggregateCount() { + return aggregateCount; + } + + public AbfsHttpOperation getResult() { + return res; + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/LatencyTracker.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java similarity index 69% rename from hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/LatencyTracker.java rename to hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java index d34768d220b74..68d48de0dc942 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/LatencyTracker.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java @@ -18,27 +18,28 @@ package org.apache.hadoop.fs.azurebfs.services; -import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.net.InetAddress; import java.net.UnknownHostException; import java.time.Duration; import java.time.Instant; import java.util.concurrent.ConcurrentLinkedQueue; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; /** - * {@code LatencyTracker} keeps track of service latencies observed by {@code AbfsClient}. Every request adds - * its information (success/failure, latency etc) to the {@code LatencyTracker}'s queue. - * When a request is made, we check {@code LatencyTracker} to see if there are any latency numbers to be reported. - * If there are any, the stats are added to an HTTP header ({@code x-ms-abfs-client-latency}) on the next request. * + * {@code AbfsPerfTracker} keeps track of service latencies observed by {@code AbfsClient}. Every request hands over + * its perf-related information as a {@code AbfsPerfInfo} object (contains success/failure, latency etc) to the + * {@code AbfsPerfTracker}'s queue. When a request is made, we check {@code AbfsPerfTracker} to see if there are + * any latency numbers to be reported. If there are any, the stats are added to an HTTP header + * ({@code x-ms-abfs-client-latency}) on the next request. */ -public class LatencyTracker { +public final class AbfsPerfTracker { // the logger - private static final Logger LOG = LoggerFactory.getLogger(LatencyTracker.class); + private static final Logger LOG = LoggerFactory.getLogger(AbfsPerfTracker.class); // the queue to hold latency information private final ConcurrentLinkedQueue queue = new ConcurrentLinkedQueue(); @@ -49,28 +50,20 @@ public class LatencyTracker { // the host name private String hostName; - // the file system name - private String filesystemName; - - // the account name - private String accountName; - // singleton latency reporting format private String singletonLatencyReportingFormat; // aggregate latency reporting format private String aggregateLatencyReportingFormat; - public LatencyTracker(String filesystemName, String accountName, AbfsConfiguration configuration) { + public AbfsPerfTracker(String filesystemName, String accountName, AbfsConfiguration configuration) { this(filesystemName, accountName, configuration.shouldTrackLatency()); } - protected LatencyTracker(String filesystemName, String accountName, boolean enabled) { + protected AbfsPerfTracker(String filesystemName, String accountName, boolean enabled) { this.enabled = enabled; - this.filesystemName = filesystemName; - this.accountName = accountName; - LOG.debug("LatencyTracker configuration: {}", enabled); + LOG.debug("AbfsPerfTracker configuration: {}", enabled); if (enabled) { try { @@ -84,31 +77,48 @@ protected LatencyTracker(String filesystemName, String accountName, boolean enab } } - public void recordClientLatency( - Instant operationStart, - String callerName, - String calleeName, - boolean success, - AbfsHttpOperation res) { + public void trackInfo(AbfsPerfInfo perfInfo) + { if (!enabled) { return; } - Instant operationStop = getLatencyInstant(); + if (isValidInstant(perfInfo.getAggregateStart()) && perfInfo.getAggregateCount() > 0) { + recordClientLatency( + perfInfo.getTrackingStart(), + perfInfo.getTrackingEnd(), + perfInfo.getCallerName(), + perfInfo.getCalleeName(), + perfInfo.getSuccess(), + perfInfo.getAggregateStart(), + perfInfo.getAggregateCount(), + perfInfo.getResult()); + } else { + recordClientLatency( + perfInfo.getTrackingStart(), + perfInfo.getTrackingEnd(), + perfInfo.getCallerName(), + perfInfo.getCalleeName(), + perfInfo.getSuccess(), + perfInfo.getResult()); + } + } - recordClientLatency(operationStart, operationStop, callerName, calleeName, success, res); + public Instant getLatencyInstant() { + if (!enabled) { + return null; + } + + return Instant.now(); } - public void recordClientLatency( + private void recordClientLatency( Instant operationStart, Instant operationStop, String callerName, String calleeName, boolean success, AbfsHttpOperation res) { - if (!enabled) { - return; - } Instant trackerStart = Instant.now(); long latency = isValidInstant(operationStart) && isValidInstant(operationStop) @@ -125,24 +135,7 @@ public void recordClientLatency( this.offerToQueue(trackerStart, latencyDetails); } - public void recordClientLatency( - Instant operationStart, - String callerName, - String calleeName, - boolean success, - Instant aggregateStart, - long aggregateCount, - AbfsHttpOperation res) { - if (!enabled) { - return; - } - - Instant operationStop = getLatencyInstant(); - - recordClientLatency(operationStart, operationStop, callerName, calleeName, success, aggregateStart, aggregateCount, res); - } - - public void recordClientLatency( + private void recordClientLatency( Instant operationStart, Instant operationStop, String callerName, @@ -151,9 +144,6 @@ public void recordClientLatency( Instant aggregateStart, long aggregateCount, AbfsHttpOperation res){ - if (!enabled) { - return; - } Instant trackerStart = Instant.now(); long latency = isValidInstant(operationStart) && isValidInstant(operationStop) @@ -185,27 +175,19 @@ public String getClientLatency() { if (LOG.isDebugEnabled()) { Instant stop = Instant.now(); long elapsed = Duration.between(trackerStart, stop).toMillis(); - LOG.debug(String.format("Dequeued latency info [%s ms]: %s", elapsed, latencyDetails)); + LOG.debug("Dequeued latency info [{} ms]: {}", elapsed, latencyDetails); } return latencyDetails; } - public Instant getLatencyInstant() { - if (!enabled) { - return null; - } - - return Instant.now(); - } - private void offerToQueue(Instant trackerStart, String latencyDetails) { queue.offer(latencyDetails); // non-blocking append if (LOG.isDebugEnabled()) { Instant trackerStop = Instant.now(); long elapsed = Duration.between(trackerStart, trackerStop).toMillis(); - LOG.debug(String.format("Queued latency info [%s ms]: %s", elapsed, latencyDetails)); + LOG.debug("Queued latency info [{} ms]: {}", elapsed, latencyDetails); } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java index 44deb8e013319..44b8ee5ecd7ef 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java @@ -122,7 +122,7 @@ public AbfsHttpOperation getResult() { */ void execute() throws AzureBlobFileSystemException { // see if we have latency reports from the previous requests - String latencyHeader = this.client.getLatencyTracker().getClientLatency(); + String latencyHeader = this.client.getAbfsPerfTracker().getClientLatency(); if (latencyHeader != null && !latencyHeader.isEmpty()) { requestHeaders.add(new AbfsHttpHeader("x-ms-abfs-client-latency", latencyHeader)); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsPerfTracker.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsPerfTracker.java new file mode 100644 index 0000000000000..72689be34edb8 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsPerfTracker.java @@ -0,0 +1,399 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import java.net.URL; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.regex.Pattern; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Test the latency tracker for ABFS. + * + */ +public final class TestAbfsPerfTracker { + private static final Logger LOG = LoggerFactory.getLogger(TestAbfsPerfTracker.class); + private static ExecutorService executorService = null; + private static final int TEST_AGGREGATE_COUNT = 42; + private final String filesystemName = "bogusFilesystemName"; + private final String accountName = "bogusAccountName"; + private final URL url; + + public TestAbfsPerfTracker() throws Exception { + this.url = new URL("http", "www.microsoft.com", "/bogusFile"); + } + + @Before + public void setUp() throws Exception { + executorService = Executors.newCachedThreadPool(); + } + + @After + public void tearDown() throws Exception { + executorService.shutdown(); + } + + @Test + public void verifyDisablingOfTracker() throws Exception { + // verify that disabling of the tracker works + AbfsPerfTracker abfsPerfTracker = new AbfsPerfTracker(accountName, filesystemName, false); + + String latencyDetails = abfsPerfTracker.getClientLatency(); + assertThat(latencyDetails).describedAs("AbfsPerfTracker should be empty").isNull(); + + try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "disablingCaller", "disablingCallee")) { + tracker.registerResult(new AbfsHttpOperation(url, "GET", new ArrayList<>())).registerSuccess(true); + } + + latencyDetails = abfsPerfTracker.getClientLatency(); + assertThat(latencyDetails).describedAs("AbfsPerfTracker should return no record").isNull(); + } + + @Test + public void verifyTrackingForSingletonLatencyRecords() throws Exception { + // verify that tracking for singleton latency records works as expected + final int numTasks = 100; + AbfsPerfTracker abfsPerfTracker = new AbfsPerfTracker(accountName, filesystemName, true); + + String latencyDetails = abfsPerfTracker.getClientLatency(); + assertThat(latencyDetails).describedAs("AbfsPerfTracker should be empty").isNull(); + + List> tasks = new ArrayList<>(); + AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList<>()); + + for (int i=0; i < numTasks; i++) { + tasks.add(() -> { + try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", "oneOperationCallee")) { + tracker.registerResult(httpOperation).registerSuccess(true); + return 0; + } + }); + } + + for (Future fr: executorService.invokeAll(tasks)) { + fr.get(); + } + + for (int i=0; i < numTasks; i++) { + latencyDetails = abfsPerfTracker.getClientLatency(); + assertThat(latencyDetails).describedAs("AbfsPerfTracker should return non-null record").isNotNull(); + assertThat(Pattern.matches( + "h=[^ ]* t=[^ ]* a=bogusFilesystemName c=bogusAccountName cr=oneOperationCaller ce=oneOperationCallee r=Succeeded l=[0-9]+" + + " s=0 e= ci=[^ ]* ri=[^ ]* ct=[0-9]+ st=[0-9]+ rt=[0-9]+ bs=0 br=0 m=GET u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile", latencyDetails)) + .describedAs("Latency record should be in the correct format").isTrue(); + } + + latencyDetails = abfsPerfTracker.getClientLatency(); + assertThat(latencyDetails).describedAs("AbfsPerfTracker should return no record").isNull(); + } + + @Test + public void verifyTrackingForAggregateLatencyRecords() throws Exception { + // verify that tracking of aggregate latency records works as expected + final int numTasks = 100; + AbfsPerfTracker abfsPerfTracker = new AbfsPerfTracker(accountName, filesystemName, true); + + String latencyDetails = abfsPerfTracker.getClientLatency(); + Assert.assertNull("AbfsPerfTracker should be empty", latencyDetails); + + List> tasks = new ArrayList<>(); + AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList<>()); + + for (int i=0; i < numTasks; i++) { + tasks.add(() -> { + try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", "oneOperationCallee")) { + tracker.registerResult(httpOperation).registerSuccess(true).registerAggregates(Instant.now(), TEST_AGGREGATE_COUNT); + return 0; + } + }); + } + + for (Future fr: executorService.invokeAll(tasks)) { + fr.get(); + } + + for (int i=0; i < numTasks; i++) { + latencyDetails = abfsPerfTracker.getClientLatency(); + assertThat(latencyDetails).describedAs("AbfsPerfTracker should return non-null record").isNotNull(); + assertThat(Pattern.matches( + "h=[^ ]* t=[^ ]* a=bogusFilesystemName c=bogusAccountName cr=oneOperationCaller ce=oneOperationCallee r=Succeeded l=[0-9]+" + + " ls=[0-9]+ lc=" + TEST_AGGREGATE_COUNT + " s=0 e= ci=[^ ]* ri=[^ ]* ct=[0-9]+ st=[0-9]+ rt=[0-9]+ bs=0 br=0 m=GET u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile", latencyDetails)) + .describedAs("Latency record should be in the correct format").isTrue(); + } + + latencyDetails = abfsPerfTracker.getClientLatency(); + assertThat(latencyDetails).describedAs("AbfsPerfTracker should return no record").isNull(); + } + + @Test + public void verifyRecordingSingletonLatencyIsCheapWhenDisabled() throws Exception { + // when latency tracker is disabled, we expect it to take time equivalent to checking a boolean value + final double maxLatencyWhenDisabledMs = 1; + final double minLatencyWhenDisabledMs = 0; + final long numTasks = 1000; + long aggregateLatency = 0; + AbfsPerfTracker abfsPerfTracker = new AbfsPerfTracker(accountName, filesystemName, false); + List> tasks = new ArrayList<>(); + final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList<>()); + + for (int i=0; i < numTasks; i++) { + tasks.add(() -> { + Instant startRecord = Instant.now(); + + try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", "oneOperationCallee")) { + tracker.registerResult(httpOperation).registerSuccess(true); + } + + long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); + LOG.debug("Spent {} ms in recording latency.", latencyRecord); + return latencyRecord; + }); + } + + for (Future fr: executorService.invokeAll(tasks)) { + aggregateLatency += fr.get(); + } + + double averageRecordLatency = aggregateLatency/numTasks; + assertThat(averageRecordLatency).describedAs("Average time for recording singleton latencies should be bounded") + .isBetween(minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs); + } + + @Test + public void verifyRecordingAggregateLatencyIsCheapWhenDisabled() throws Exception { + // when latency tracker is disabled, we expect it to take time equivalent to checking a boolean value + final double maxLatencyWhenDisabledMs = 1; + final double minLatencyWhenDisabledMs = 0; + final long numTasks = 1000; + long aggregateLatency = 0; + AbfsPerfTracker abfsPerfTracker = new AbfsPerfTracker(accountName, filesystemName, false); + List> tasks = new ArrayList<>(); + final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList<>()); + + for (int i=0; i < numTasks; i++) { + tasks.add(() -> { + Instant startRecord = Instant.now(); + + try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", "oneOperationCallee")) { + tracker.registerResult(httpOperation).registerSuccess(true).registerAggregates(startRecord, TEST_AGGREGATE_COUNT); + } + + long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); + LOG.debug("Spent {} ms in recording latency.", latencyRecord); + return latencyRecord; + }); + } + + for (Future fr: executorService.invokeAll(tasks)) { + aggregateLatency += fr.get(); + } + + double averageRecordLatency = aggregateLatency/numTasks; + assertThat(averageRecordLatency).describedAs("Average time for recording aggregate latencies should be bounded") + .isBetween(minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs); + } + + @Test + public void verifyGettingLatencyRecordsIsCheapWhenDisabled() throws Exception { + // when latency tracker is disabled, we expect it to take time equivalent to checking a boolean value + final double maxLatencyWhenDisabledMs = 1; + final double minLatencyWhenDisabledMs = 0; + final long numTasks = 1000; + long aggregateLatency = 0; + AbfsPerfTracker abfsPerfTracker = new AbfsPerfTracker(accountName, filesystemName, false); + List> tasks = new ArrayList<>(); + + for (int i=0; i < numTasks; i++) { + tasks.add(() -> { + Instant startGet = Instant.now(); + abfsPerfTracker.getClientLatency(); + long latencyGet = Duration.between(startGet, Instant.now()).toMillis(); + LOG.debug("Spent {} ms in retrieving latency record.", latencyGet); + return latencyGet; + }); + } + + for (Future fr: executorService.invokeAll(tasks)) { + aggregateLatency += fr.get(); + } + + double averageRecordLatency = aggregateLatency/numTasks; + assertThat(averageRecordLatency).describedAs("Average time for getting latency records should be bounded") + .isBetween(minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs); + } + + @Test + public void verifyRecordingSingletonLatencyIsCheapWhenEnabled() throws Exception { + final double maxLatencyWhenDisabledMs = 50; + final double minLatencyWhenDisabledMs = 0; + final long numTasks = 1000; + long aggregateLatency = 0; + AbfsPerfTracker abfsPerfTracker = new AbfsPerfTracker(accountName, filesystemName, true); + List> tasks = new ArrayList<>(); + final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList<>()); + + for (int i=0; i < numTasks; i++) { + tasks.add(() -> { + Instant startRecord = Instant.now(); + + try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", "oneOperationCallee")) { + tracker.registerResult(httpOperation).registerSuccess(true); + } + + long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); + LOG.debug("Spent {} ms in recording latency.", latencyRecord); + return latencyRecord; + }); + } + + for (Future fr: executorService.invokeAll(tasks)) { + aggregateLatency += fr.get(); + } + + double averageRecordLatency = aggregateLatency/numTasks; + assertThat(averageRecordLatency).describedAs("Average time for recording singleton latencies should be bounded") + .isBetween(minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs); + } + + @Test + public void verifyRecordingAggregateLatencyIsCheapWhenEnabled() throws Exception { + final double maxLatencyWhenDisabledMs = 50; + final double minLatencyWhenDisabledMs = 0; + final long numTasks = 1000; + long aggregateLatency = 0; + AbfsPerfTracker abfsPerfTracker = new AbfsPerfTracker(accountName, filesystemName, true); + List> tasks = new ArrayList<>(); + final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList<>()); + + for (int i=0; i < numTasks; i++) { + tasks.add(() -> { + Instant startRecord = Instant.now(); + + try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", "oneOperationCallee")) { + tracker.registerResult(httpOperation).registerSuccess(true).registerAggregates(startRecord, TEST_AGGREGATE_COUNT); + } + + long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); + LOG.debug("Spent {} ms in recording latency.", latencyRecord); + return latencyRecord; + }); + } + + for (Future fr: executorService.invokeAll(tasks)) { + aggregateLatency += fr.get(); + } + + double averageRecordLatency = aggregateLatency/numTasks; + assertThat(averageRecordLatency).describedAs("Average time for recording aggregate latencies is bounded") + .isBetween(minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs); + } + + @Test + public void verifyGettingLatencyRecordsIsCheapWhenEnabled() throws Exception { + final double maxLatencyWhenDisabledMs = 50; + final double minLatencyWhenDisabledMs = 0; + final long numTasks = 1000; + long aggregateLatency = 0; + AbfsPerfTracker abfsPerfTracker = new AbfsPerfTracker(accountName, filesystemName, true); + List> tasks = new ArrayList<>(); + + for (int i=0; i < numTasks; i++) { + tasks.add(() -> { + Instant startRecord = Instant.now(); + abfsPerfTracker.getClientLatency(); + long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); + LOG.debug("Spent {} ms in recording latency.", latencyRecord); + return latencyRecord; + }); + } + + for (Future fr: executorService.invokeAll(tasks)) { + aggregateLatency += fr.get(); + } + + double averageRecordLatency = aggregateLatency/numTasks; + assertThat(averageRecordLatency).describedAs("Average time for getting latency records should be bounded") + .isBetween(minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs); + } + + @Test + public void verifyNoExceptionOnInvalidInputWhenDisabled() throws Exception { + Instant testInstant = Instant.now(); + AbfsPerfTracker abfsPerfTrackerDisabled = new AbfsPerfTracker(accountName, filesystemName, false); + AbfsPerfTracker abfsPerfTrackerEnabled = new AbfsPerfTracker(accountName, filesystemName, true); + final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); + + verifyNoException(abfsPerfTrackerDisabled); + verifyNoException(abfsPerfTrackerEnabled); + } + + private void verifyNoException(AbfsPerfTracker abfsPerfTracker) throws Exception { + Instant testInstant = Instant.now(); + final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); + + try ( + AbfsPerfInfo tracker01 = new AbfsPerfInfo(abfsPerfTracker, null, null); + AbfsPerfInfo tracker02 = new AbfsPerfInfo(abfsPerfTracker, "test", null); + AbfsPerfInfo tracker03 = new AbfsPerfInfo(abfsPerfTracker, "test", "test"); + AbfsPerfInfo tracker04 = new AbfsPerfInfo(abfsPerfTracker, "test", "test"); + + AbfsPerfInfo tracker05 = new AbfsPerfInfo(abfsPerfTracker, null, null); + AbfsPerfInfo tracker06 = new AbfsPerfInfo(abfsPerfTracker, "test", null); + AbfsPerfInfo tracker07 = new AbfsPerfInfo(abfsPerfTracker, "test", "test"); + AbfsPerfInfo tracker08 = new AbfsPerfInfo(abfsPerfTracker, "test", "test"); + AbfsPerfInfo tracker09 = new AbfsPerfInfo(abfsPerfTracker, "test", "test"); + AbfsPerfInfo tracker10 = new AbfsPerfInfo(abfsPerfTracker, "test", "test"); + + AbfsPerfInfo tracker11 = new AbfsPerfInfo(abfsPerfTracker, "test", "test"); + AbfsPerfInfo tracker12 = new AbfsPerfInfo(abfsPerfTracker, "test", "test"); + AbfsPerfInfo tracker13 = new AbfsPerfInfo(abfsPerfTracker, "test", "test"); + ) { + tracker01.registerResult(null).registerSuccess(false); + tracker02.registerResult(null).registerSuccess(false); + tracker03.registerResult(null).registerSuccess(false); + tracker04.registerResult(httpOperation).registerSuccess(false); + + tracker05.registerResult(null).registerSuccess(false).registerAggregates(null, 0); + tracker06.registerResult(null).registerSuccess(false).registerAggregates(null, 0); + tracker07.registerResult(null).registerSuccess(false).registerAggregates(null, 0); + tracker08.registerResult(httpOperation).registerSuccess(false).registerAggregates(null, 0); + tracker09.registerResult(httpOperation).registerSuccess(false).registerAggregates(Instant.now(), 0); + tracker10.registerResult(httpOperation).registerSuccess(false).registerAggregates(Instant.now(), TEST_AGGREGATE_COUNT); + + tracker11.registerResult(httpOperation).registerSuccess(false).registerAggregates(testInstant, TEST_AGGREGATE_COUNT); + tracker12.registerResult(httpOperation).registerSuccess(false).registerAggregates(Instant.MAX, TEST_AGGREGATE_COUNT); + tracker13.registerResult(httpOperation).registerSuccess(false).registerAggregates(Instant.MIN, TEST_AGGREGATE_COUNT); + } + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestLatencyTracker.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestLatencyTracker.java deleted file mode 100644 index 74d4f64df1ecd..0000000000000 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestLatencyTracker.java +++ /dev/null @@ -1,479 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.azurebfs.services; - -import org.junit.Assert; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.net.URL; -import java.time.Duration; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.regex.Pattern; - -/** - * Test the latency tracker for abfs - * - */ -public final class TestLatencyTracker { - private static final Logger LOG = LoggerFactory.getLogger(TestLatencyTracker.class); - private static final int TEST_AGGREGATE_LATENCY = 42; - private final String filesystemName = "bogusFilesystemName"; - private final String accountName = "bogusAccountName"; - private final URL url; - - public TestLatencyTracker() throws Exception { - this.url = new URL("http", "www.microsoft.com", "/bogusFile"); - } - - @Test - public void verifyDisablingOfTracker() throws Exception { - // verify that disabling of the tracker works - LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, false); - - String latencyDetails = latencyTracker.getClientLatency(); - Assert.assertNull("LatencyTracker should be empty", latencyDetails); - - latencyTracker.recordClientLatency(Instant.now(), "disablingCaller", "disablingCallee", true, - new AbfsHttpOperation(url, "GET", new ArrayList())); - - latencyDetails = latencyTracker.getClientLatency(); - Assert.assertNull("LatencyTracker should return no record", latencyDetails); - } - - @Test - public void verifyTrackingForSingletonLatencyRecords() throws Exception { - // verify that tracking for singleton latency records works as expected - final int numTasks = 100; - LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, true); - - String latencyDetails = latencyTracker.getClientLatency(); - Assert.assertNull("LatencyTracker should be empty", latencyDetails); - - ExecutorService executorService = Executors.newCachedThreadPool(); - List> tasks = new ArrayList>(); - AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - - for (int i=0; i < numTasks; i++) { - Callable c = new Callable() { - @Override - public Integer call() throws Exception { - latencyTracker.recordClientLatency(Instant.now(), "oneOperationCaller", "oneOperationCallee", true, httpOperation); - return 0; - } - }; - tasks.add(c); - } - - for (Future fr: executorService.invokeAll(tasks)) { - fr.get(); - } - - for (int i=0; i < numTasks; i++) { - latencyDetails = latencyTracker.getClientLatency(); - Assert.assertNotNull("LatencyTracker should return non-null record", latencyDetails); - Assert.assertTrue("Latency record should be in the correct format", Pattern.matches( - "h=[^ ]* t=[^ ]* a=bogusFilesystemName c=bogusAccountName cr=oneOperationCaller ce=oneOperationCallee r=Succeeded l=[0-9]+" - + " s=0 e= ci=[^ ]* ri=[^ ]* bs=0 br=0 m=GET u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile", latencyDetails)); - } - } - - @Test - public void verifyTrackingForAggregateLatencyRecords() throws Exception { - // verify that tracking of aggregate latency records works as expected - final int numTasks = 100; - LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, true); - - String latencyDetails = latencyTracker.getClientLatency(); - Assert.assertNull("LatencyTracker should be empty", latencyDetails); - - ExecutorService executorService = Executors.newCachedThreadPool(); - List> tasks = new ArrayList>(); - AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - - for (int i=0; i < numTasks; i++) { - Callable c = new Callable() { - @Override - public Integer call() throws Exception { - // test latency tracking when aggregate latency numbers are also passed - latencyTracker.recordClientLatency(Instant.now(), "oneOperationCaller", "oneOperationCallee", true, Instant.now(), TEST_AGGREGATE_LATENCY, httpOperation); - return 0; - } - }; - tasks.add(c); - } - - for (Future fr: executorService.invokeAll(tasks)) { - fr.get(); - } - - for (int i=0; i < numTasks; i++) { - latencyDetails = latencyTracker.getClientLatency(); - Assert.assertNotNull("LatencyTracker should return non-null record", latencyDetails); - Assert.assertTrue("Latency record should be in the correct format", Pattern.matches( - "h=[^ ]* t=[^ ]* a=bogusFilesystemName c=bogusAccountName cr=oneOperationCaller ce=oneOperationCallee r=Succeeded l=[0-9]+" - + " ls=[0-9]+ lc=" + TEST_AGGREGATE_LATENCY + " s=0 e= ci=[^ ]* ri=[^ ]* bs=0 br=0 m=GET u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile", latencyDetails)); - } - } - - @Test - public void verifyRecordingSingletonLatencyIsCheapWhenDisabled() throws Exception { - // when latency tracker is disabled, we expect it to take time equivalent to checking a boolean value - final long maxLatencyWhenDisabledMs = 1; - final long minLatencyWhenDisabledMs = 0; - final long numTasks = 1000; - long aggregateLatency = 0; - LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, false); - - ExecutorService executorService = Executors.newCachedThreadPool(); - List> tasks = new ArrayList>(); - final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - - for (int i=0; i < numTasks; i++) { - Callable c = new Callable() { - @Override - public Long call() throws Exception { - Instant startRecord = Instant.now(); - - try{ - } finally { - latencyTracker.recordClientLatency(startRecord, "oneOperationCaller", "oneOperationCallee", true, httpOperation); - } - - long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); - LOG.debug("Spent {} ms in recording latency.", latencyRecord); - return latencyRecord; - } - }; - tasks.add(c); - } - - for (Future fr: executorService.invokeAll(tasks)) { - aggregateLatency += fr.get(); - } - - double averageRecordLatency = aggregateLatency/numTasks; - Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", - averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), - averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); - } - - @Test - public void verifyRecordingAggregateLatencyIsCheapWhenDisabled() throws Exception { - // when latency tracker is disabled, we expect it to take time equivalent to checking a boolean value - final long maxLatencyWhenDisabledMs = 1; - final long minLatencyWhenDisabledMs = 0; - final long numTasks = 1000; - long aggregateLatency = 0; - LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, false); - - ExecutorService executorService = Executors.newCachedThreadPool(); - List> tasks = new ArrayList>(); - final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - - for (int i=0; i < numTasks; i++) { - Callable c = new Callable() { - @Override - public Long call() throws Exception { - Instant startRecord = Instant.now(); - - try { - // placeholder try block - } finally { - latencyTracker.recordClientLatency(startRecord, "oneOperationCaller", "oneOperationCallee", true, startRecord, TEST_AGGREGATE_LATENCY, httpOperation); - } - - long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); - LOG.debug("Spent {} ms in recording latency.", latencyRecord); - return latencyRecord; - } - }; - tasks.add(c); - } - - for (Future fr: executorService.invokeAll(tasks)) { - aggregateLatency += fr.get(); - } - - double averageRecordLatency = aggregateLatency/numTasks; - Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", - averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), - averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); - } - - @Test - public void verifyGettingLatencyRecordsIsCheapWhenDisabled() throws Exception { - // when latency tracker is disabled, we expect it to take time equivalent to checking a boolean value - final long maxLatencyWhenDisabledMs = 1; - final long minLatencyWhenDisabledMs = 0; - final long numTasks = 1000; - long aggregateLatency = 0; - LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, false); - - ExecutorService executorService = Executors.newCachedThreadPool(); - List> tasks = new ArrayList>(); - final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - - for (int i=0; i < numTasks; i++) { - Callable c = new Callable() { - @Override - public Long call() throws Exception { - Instant startGet = Instant.now(); - latencyTracker.getClientLatency(); - long latencyGet = Duration.between(startGet, Instant.now()).toMillis(); - LOG.debug("Spent {} ms in retrieving latency record.", latencyGet); - return latencyGet; - } - }; - tasks.add(c); - } - - for (Future fr: executorService.invokeAll(tasks)) { - aggregateLatency += fr.get(); - } - - double averageRecordLatency = aggregateLatency/numTasks; - Assert.assertTrue(String.format("Average time for getting latency records, %s ms should be in the range [%s, %s).", - averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), - averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); - } - - @Test - public void verifyRecordingSingletonLatencyIsCheapWhenEnabled() throws Exception { - final long maxLatencyWhenDisabledMs = 50; - final long minLatencyWhenDisabledMs = 0; - final long numTasks = 1000; - long aggregateLatency = 0; - LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, true); - - ExecutorService executorService = Executors.newCachedThreadPool(); - List> tasks = new ArrayList>(); - final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - - for (int i=0; i < numTasks; i++) { - Callable c = new Callable() { - @Override - public Long call() throws Exception { - Instant startRecord = Instant.now(); - - try { - // placeholder try block - } finally { - latencyTracker.recordClientLatency(startRecord, "oneOperationCaller", "oneOperationCallee", true, httpOperation); - } - - long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); - LOG.debug("Spent {} ms in recording latency.", latencyRecord); - return latencyRecord; - } - }; - tasks.add(c); - } - - for (Future fr: executorService.invokeAll(tasks)) { - aggregateLatency += fr.get(); - } - - double averageRecordLatency = aggregateLatency/numTasks; - Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", - averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), - averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); - } - - @Test - public void verifyRecordingAggregateLatencyIsCheapWhenEnabled() throws Exception { - final long maxLatencyWhenDisabledMs = 50; - final long minLatencyWhenDisabledMs = 0; - final long numTasks = 1000; - long aggregateLatency = 0; - LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, true); - - ExecutorService executorService = Executors.newCachedThreadPool(); - List> tasks = new ArrayList>(); - final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - - for (int i=0; i < numTasks; i++) { - Callable c = new Callable() { - @Override - public Long call() throws Exception { - Instant startRecord = Instant.now(); - - try { - // placeholder try block - } finally { - latencyTracker.recordClientLatency(startRecord, "oneOperationCaller", "oneOperationCallee", true, startRecord, TEST_AGGREGATE_LATENCY, httpOperation); - } - - long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); - LOG.debug("Spent {} ms in recording latency.", latencyRecord); - return latencyRecord; - } - }; - tasks.add(c); - } - - for (Future fr: executorService.invokeAll(tasks)) { - aggregateLatency += fr.get(); - } - - double averageRecordLatency = aggregateLatency/numTasks; - Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", - averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), - averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); - } - - @Test - public void verifyGettingLatencyRecordsIsCheapWhenEnabled() throws Exception { - final long maxLatencyWhenDisabledMs = 50; - final long minLatencyWhenDisabledMs = 0; - final long numTasks = 1000; - long aggregateLatency = 0; - LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, true); - - ExecutorService executorService = Executors.newCachedThreadPool(); - List> tasks = new ArrayList>(); - final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - - for (int i=0; i < numTasks; i++) { - Callable c = new Callable() { - @Override - public Long call() throws Exception { - Instant startRecord = Instant.now(); - latencyTracker.getClientLatency(); - long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); - LOG.debug("Spent {} ms in recording latency.", latencyRecord); - return latencyRecord; - } - }; - tasks.add(c); - } - - for (Future fr: executorService.invokeAll(tasks)) { - aggregateLatency += fr.get(); - } - - double averageRecordLatency = aggregateLatency/numTasks; - Assert.assertTrue(String.format("Average time for recording singleton latencies, %s ms should be in the range [%s, %s).", - averageRecordLatency, minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs), - averageRecordLatency < maxLatencyWhenDisabledMs && averageRecordLatency >= minLatencyWhenDisabledMs); - } - - @Test - public void verifyNoExceptionOnInvalidInputWhenDisabled() throws Exception { - Instant testInstant = Instant.now(); - LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, false); - final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - - try { - latencyTracker.recordClientLatency(null, null, null, false, null); - latencyTracker.recordClientLatency(Instant.now(), null, null, false, null); - latencyTracker.recordClientLatency(Instant.now(), "test", null, false, null); - latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, null); - latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, httpOperation); - - latencyTracker.recordClientLatency(null, null, null, null, false, null); - latencyTracker.recordClientLatency(Instant.now(), null, null, null, false, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), null, null, false, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", null, false, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, httpOperation); - - latencyTracker.recordClientLatency(testInstant, Instant.now(), null, null, false, null); - latencyTracker.recordClientLatency(Instant.MAX, Instant.now(), null, null, false, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.MIN, null, null, false, null); - - latencyTracker.recordClientLatency(null, null, null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), null, null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), "test", null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), 0, null); - latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), TEST_AGGREGATE_LATENCY, httpOperation); - - latencyTracker.recordClientLatency(null, null, null, null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), null, null, null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), null, null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), 0, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), TEST_AGGREGATE_LATENCY, httpOperation); - - latencyTracker.recordClientLatency(testInstant, Instant.now(), null, null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.MAX, Instant.now(), null, null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.MIN, null, null, false, null, 0, null); - - - } catch (Exception e) { - Assert.assertTrue("There should be no exception", false); - } - } - - @Test - public void verifyNoExceptionOnInvalidInputWhenEnabled() throws Exception { - Instant testInstant = Instant.now(); - LatencyTracker latencyTracker = new LatencyTracker(accountName, filesystemName, true); - final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList()); - - try { - latencyTracker.recordClientLatency(null, null, null, false, null); - latencyTracker.recordClientLatency(Instant.now(), null, null, false, null); - latencyTracker.recordClientLatency(Instant.now(), "test", null, false, null); - latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, null); - latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, httpOperation); - - latencyTracker.recordClientLatency(null, null, null, null, false, null); - latencyTracker.recordClientLatency(Instant.now(), null, null, null, false, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), null, null, false, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", null, false, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, httpOperation); - - latencyTracker.recordClientLatency(testInstant, Instant.now(), null, null, false, null); - latencyTracker.recordClientLatency(Instant.MAX, Instant.now(), null, null, false, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.MIN, null, null, false, null); - - latencyTracker.recordClientLatency(null, null, null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), null, null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), "test", null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), 0, null); - latencyTracker.recordClientLatency(Instant.now(), "test", "test", false, Instant.now(), TEST_AGGREGATE_LATENCY, httpOperation); - - latencyTracker.recordClientLatency(null, null, null, null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), null, null, null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), null, null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), 0, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.now(), "test", "test", false, Instant.now(), TEST_AGGREGATE_LATENCY, httpOperation); - - latencyTracker.recordClientLatency(testInstant, Instant.now(), null, null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.MAX, Instant.now(), null, null, false, null, 0, null); - latencyTracker.recordClientLatency(Instant.now(), Instant.MIN, null, null, false, null, 0, null); - } catch (Exception e){ - Assert.assertTrue("There should be no exception", false); - } - } -} \ No newline at end of file From d3258ff74eff05b2defb6f01f50e0d0efb7994c9 Mon Sep 17 00:00:00 2001 From: Jeetesh Mangwani Date: Fri, 25 Oct 2019 12:26:02 -0700 Subject: [PATCH 05/13] use generic interfaceto get log string --- .../contracts/services/AbfsPerfLoggable.java | 32 +++++++++++++++++++ .../azurebfs/services/AbfsHttpOperation.java | 5 +-- .../fs/azurebfs/services/AbfsPerfInfo.java | 8 +++-- .../fs/azurebfs/services/AbfsPerfTracker.java | 9 +++--- 4 files changed, 45 insertions(+), 9 deletions(-) create mode 100644 hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AbfsPerfLoggable.java diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AbfsPerfLoggable.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AbfsPerfLoggable.java new file mode 100644 index 0000000000000..041115c8a98d1 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AbfsPerfLoggable.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.contracts.services; + +import org.apache.hadoop.classification.InterfaceStability; + +/** + * The AbfsPerfLoggable contract. + */ +@InterfaceStability.Evolving +public interface AbfsPerfLoggable { + /** + * Get's the string to log to the Abfs Logging API. + */ + String getLogString(); +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java index 3b27e218626de..6be59deaedcf3 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java @@ -42,12 +42,13 @@ import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; +import org.apache.hadoop.fs.azurebfs.contracts.services.AbfsPerfLoggable; import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultSchema; /** * Represents an HTTP operation. */ -public class AbfsHttpOperation { +public class AbfsHttpOperation implements AbfsPerfLoggable { private static final Logger LOG = LoggerFactory.getLogger(AbfsHttpOperation.class); private static final int CONNECT_TIMEOUT = 30 * 1000; @@ -164,7 +165,7 @@ public String toString() { } // Returns a trace message for the ABFS API logging service to consume - public String toKvpString() { + public String getLogString() { String urlStr = null; try{ diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java index 9760c57c691f6..07e501167188e 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java @@ -20,6 +20,8 @@ import java.time.Instant; +import org.apache.hadoop.fs.azurebfs.contracts.services.AbfsPerfLoggable; + /** * {@code AbfsPerfInfo} holds information on ADLS Gen 2 API performance observed by {@code AbfsClient}. Every * Abfs request keeps adding its information (success/failure, latency etc) to its {@code AbfsPerfInfo}'s object @@ -53,7 +55,7 @@ public final class AbfsPerfInfo implements AutoCloseable { private long aggregateCount; // result of the request - private AbfsHttpOperation res; + private AbfsPerfLoggable res; public AbfsPerfInfo(AbfsPerfTracker abfsPerfTracker, String callerName, String calleeName) { this.callerName = callerName; @@ -68,7 +70,7 @@ public AbfsPerfInfo registerSuccess(boolean success) { return this; } - public AbfsPerfInfo registerResult(AbfsHttpOperation res) { + public AbfsPerfInfo registerResult(AbfsPerfLoggable res) { this.res = res; return this; } @@ -126,7 +128,7 @@ public long getAggregateCount() { return aggregateCount; } - public AbfsHttpOperation getResult() { + public AbfsPerfLoggable getResult() { return res; } } \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java index 68d48de0dc942..2a85e97815f5f 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java @@ -28,6 +28,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; +import org.apache.hadoop.fs.azurebfs.contracts.services.AbfsPerfLoggable; /** * {@code AbfsPerfTracker} keeps track of service latencies observed by {@code AbfsClient}. Every request hands over @@ -118,7 +119,7 @@ private void recordClientLatency( String callerName, String calleeName, boolean success, - AbfsHttpOperation res) { + AbfsPerfLoggable res) { Instant trackerStart = Instant.now(); long latency = isValidInstant(operationStart) && isValidInstant(operationStop) @@ -130,7 +131,7 @@ private void recordClientLatency( calleeName, success ? "Succeeded" : "Failed", latency, - res == null ? "" : (" " + res.toKvpString())); + res == null ? "" : (" " + res.getLogString())); this.offerToQueue(trackerStart, latencyDetails); } @@ -143,7 +144,7 @@ private void recordClientLatency( boolean success, Instant aggregateStart, long aggregateCount, - AbfsHttpOperation res){ + AbfsPerfLoggable res){ Instant trackerStart = Instant.now(); long latency = isValidInstant(operationStart) && isValidInstant(operationStop) @@ -159,7 +160,7 @@ private void recordClientLatency( latency, aggregateLatency, aggregateCount, - res == null ? "" : (" " + res.toKvpString())); + res == null ? "" : (" " + res.getLogString())); offerToQueue(trackerStart, latencyDetails); } From 48fb799257b6fdf2adbe692ff3c2afd0a340ece1 Mon Sep 17 00:00:00 2001 From: Jeetesh Mangwani Date: Fri, 25 Oct 2019 13:53:56 -0700 Subject: [PATCH 06/13] add detailed documentation and comments --- .../fs/azurebfs/services/AbfsPerfInfo.java | 2 +- .../fs/azurebfs/services/AbfsPerfTracker.java | 32 +++++++++++++++ .../hadoop-azure/src/site/markdown/abfs.md | 41 +++++++++++++++++++ 3 files changed, 74 insertions(+), 1 deletion(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java index 07e501167188e..2c5ca6191a376 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java @@ -25,7 +25,7 @@ /** * {@code AbfsPerfInfo} holds information on ADLS Gen 2 API performance observed by {@code AbfsClient}. Every * Abfs request keeps adding its information (success/failure, latency etc) to its {@code AbfsPerfInfo}'s object - * as and when it becomes available. When the request is over, the performance information is recorded as + * as and when it becomes available. When the request is over, the performance information is recorded while * the {@code AutoCloseable} {@code AbfsPerfInfo} object is "closed". */ public final class AbfsPerfInfo implements AutoCloseable { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java index 2a85e97815f5f..9578cc71ae47d 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java @@ -36,6 +36,38 @@ * {@code AbfsPerfTracker}'s queue. When a request is made, we check {@code AbfsPerfTracker} to see if there are * any latency numbers to be reported. If there are any, the stats are added to an HTTP header * ({@code x-ms-abfs-client-latency}) on the next request. + * + * A typical perf log line appears like: + * + * h=KARMA t=2019-10-25T20:21:14.518Z a=abfstest01.dfs.core.windows.net + * c=abfs-testcontainer-84828169-6488-4a62-a875-1e674275a29f cr=delete ce=deletePath r=Succeeded l=32 ls=32 lc=1 s=200 + * e= ci=95121dae-70a8-4187-b067-614091034558 ri=97effdcf-201f-0097-2d71-8bae00000000 ct=0 st=0 rt=0 bs=0 br=0 m=DELETE + * u=https%3A%2F%2Fabfstest01.dfs.core.windows.net%2Fabfs-testcontainer%2Ftest%3Ftimeout%3D90%26recursive%3Dtrue + * + * The fields have the following definitions: + * + * h: host name + * t: time when this request was logged + * a: Azure storage account name + * c: container name + * cr: name of the caller method + * ce: name of the callee method + * r: result (Succeeded/Failed) + * l: latency (time spend in callee) + * ls: latency sum (aggregate time spend in caller; logged when there are multiple callees) + * lc: latency count (number of callees; logged when there are multiple callees) + * s: HTTP Status code + * e: Error code + * ci: client request ID + * ri: server request ID + * ct: connection time in milliseconds + * st: sending time in milliseconds + * rt: receiving time in milliseconds + * bs: bytes sent + * br: bytes received + * m: HTTP method (GET, PUT etc) + * u: Encoded HTTP URL + * */ public final class AbfsPerfTracker { diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md index c5bad77031dce..fcc94fd68533b 100644 --- a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md +++ b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md @@ -661,6 +661,47 @@ Hflush() being the only documented API that can provide persistent data transfer, Flush() also attempting to persist buffered data will lead to performance issues. +### Perf Options + +#### 1. HTTP Request Tracking Options +If you set `fs.azure.abfs.latency.track` to `true`, the module starts tracking the performance metrics of ABFS HTTP +traffic. To obtain these numbers on your machine or cluster, you will also need to enable +debug logging for the `AbfsPerfTracker` class in your `log4j` config. A typical perf log line appears like: + +``` +h=KARMA t=2019-10-25T20:21:14.518Z a=abfstest01.dfs.core.windows.net +c=abfs-testcontainer-84828169-6488-4a62-a875-1e674275a29f cr=delete ce=deletePath r=Succeeded l=32 ls=32 lc=1 s=200 +e= ci=95121dae-70a8-4187-b067-614091034558 ri=97effdcf-201f-0097-2d71-8bae00000000 ct=0 st=0 rt=0 bs=0 br=0 m=DELETE +u=https%3A%2F%2Fabfstest01.dfs.core.windows.net%2Fabfs-testcontainer%2Ftest%3Ftimeout%3D90%26recursive%3Dtrue +``` + +The fields have the following definitions: + +`h`: host name +`t`: time when this request was logged +`a`: Azure storage account name +`c`: container name +`cr`: name of the caller method +`ce`: name of the callee method +`r`: result (Succeeded/Failed) +`l`: latency (time spend in callee) +`ls`: latency sum (aggregate time spend in caller; logged when there are multiple callees) +`lc`: latency count (number of callees; logged when there are multiple callees) +`s`: HTTP Status code +`e`: Error code +`ci`: client request ID +`ri`: server request ID +`ct`: connection time in milliseconds +`st`: sending time in milliseconds +`rt`: receiving time in milliseconds +`bs`: bytes sent +`br`: bytes received +`m`: HTTP method (GET, PUT etc) +`u`: Encoded HTTP URL + +Note that these performance numbers are also sent back to the ADLS Gen 2 API endpoints in the `x-ms-abfs-client-latency` HTTP +headers in subsequent requests. Azure uses these settings to track their end-to-end latency. + ## Troubleshooting The problems associated with the connector usually come down to, in order From 70d4020a5143a026c489d4c2ddbb1fbde413c1e8 Mon Sep 17 00:00:00 2001 From: Jeetesh Mangwani Date: Fri, 1 Nov 2019 18:36:41 -0700 Subject: [PATCH 07/13] address PR feedback --- .../fs/azurebfs/AzureBlobFileSystemStore.java | 321 +++++++++--------- .../constants/HttpHeaderConfigurations.java | 1 + .../contracts/services/AbfsPerfLoggable.java | 2 +- .../azurebfs/services/AbfsHttpOperation.java | 30 +- .../fs/azurebfs/services/AbfsInputStream.java | 5 +- .../azurebfs/services/AbfsOutputStream.java | 18 +- .../fs/azurebfs/services/AbfsPerfTracker.java | 100 +++++- .../azurebfs/services/AbfsRestOperation.java | 4 +- .../hadoop-azure/src/site/markdown/abfs.md | 27 +- .../services/TestAbfsPerfTracker.java | 75 ++-- 10 files changed, 358 insertions(+), 225 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index 8102926f91792..4cc504b4429e5 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -73,8 +73,18 @@ import org.apache.hadoop.fs.azurebfs.extensions.ExtensionHelper; import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider; import org.apache.hadoop.fs.azurebfs.oauth2.IdentityTransformer; -import org.apache.hadoop.fs.azurebfs.services.*; +import org.apache.hadoop.fs.azurebfs.services.AbfsAclHelper; +import org.apache.hadoop.fs.azurebfs.services.AbfsClient; +import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation; +import org.apache.hadoop.fs.azurebfs.services.AbfsInputStream; +import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; +import org.apache.hadoop.fs.azurebfs.services.AbfsPermission; +import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; +import org.apache.hadoop.fs.azurebfs.services.AuthType; +import org.apache.hadoop.fs.azurebfs.services.ExponentialRetryPolicy; +import org.apache.hadoop.fs.azurebfs.services.SharedKeyCredentials; import org.apache.hadoop.fs.azurebfs.services.AbfsPerfTracker; +import org.apache.hadoop.fs.azurebfs.services.AbfsPerfInfo; import org.apache.hadoop.fs.azurebfs.utils.Base64; import org.apache.hadoop.fs.azurebfs.utils.CRC64; import org.apache.hadoop.fs.azurebfs.utils.UriUtils; @@ -209,11 +219,11 @@ public boolean getIsNamespaceEnabled() throws AzureBlobFileSystemException { if (!isNamespaceEnabledSet) { LOG.debug("Get root ACL status"); - try (AbfsPerfInfo tracker = startTracking("getIsNamespaceEnabled", "getAclStatus")) { + try (AbfsPerfInfo perfInfo = startTracking("getIsNamespaceEnabled", "getAclStatus")) { AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + AbfsHttpConstants.ROOT_PATH); - tracker.registerResult(op.getResult()); + perfInfo.registerResult(op.getResult()); isNamespaceEnabled = true; - tracker.registerSuccess(true); + perfInfo.registerSuccess(true); } catch (AbfsRestOperationException ex) { // Get ACL status is a HEAD request, its response doesn't contain errorCode // So can only rely on its status code to determine its account type. @@ -264,19 +274,19 @@ public AbfsConfiguration getAbfsConfiguration() { } public Hashtable getFilesystemProperties() throws AzureBlobFileSystemException { - try (AbfsPerfInfo tracker = startTracking("getFilesystemProperties", "getFilesystemProperties")) { + try (AbfsPerfInfo perfInfo = startTracking("getFilesystemProperties", "getFilesystemProperties")) { LOG.debug("getFilesystemProperties for filesystem: {}", client.getFileSystem()); final Hashtable parsedXmsProperties; final AbfsRestOperation op = client.getFilesystemProperties(); - tracker.registerResult(op.getResult()); + perfInfo.registerResult(op.getResult()); final String xMsProperties = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_PROPERTIES); parsedXmsProperties = parseCommaSeparatedXmsProperties(xMsProperties); - tracker.registerSuccess(true); + perfInfo.registerSuccess(true); return parsedXmsProperties; } @@ -284,15 +294,15 @@ public Hashtable getFilesystemProperties() throws AzureBlobFileS public void setFilesystemProperties(final Hashtable properties) throws AzureBlobFileSystemException { - try (AbfsPerfInfo tracker = startTracking("setFilesystemProperties", "setFilesystemProperties")) { - if (properties == null || properties.isEmpty()) { - return; - } + if (properties == null || properties.isEmpty()) { + return; + } - LOG.debug("setFilesystemProperties for filesystem: {} with properties: {}", - client.getFileSystem(), - properties); + LOG.debug("setFilesystemProperties for filesystem: {} with properties: {}", + client.getFileSystem(), + properties); + try (AbfsPerfInfo perfInfo = startTracking("setFilesystemProperties", "setFilesystemProperties")) { final String commaSeparatedProperties; try { commaSeparatedProperties = convertXmsPropertiesToCommaSeparatedString(properties); @@ -301,32 +311,32 @@ public void setFilesystemProperties(final Hashtable properties) } final AbfsRestOperation op = client.setFilesystemProperties(commaSeparatedProperties); - tracker.registerResult(op.getResult()).registerSuccess(true); + perfInfo.registerResult(op.getResult()).registerSuccess(true); } } public Hashtable getPathStatus(final Path path) throws AzureBlobFileSystemException { - try (AbfsPerfInfo tracker = startTracking("getPathStatus", "getPathStatus")){ + try (AbfsPerfInfo perfInfo = startTracking("getPathStatus", "getPathStatus")){ LOG.debug("getPathStatus for filesystem: {} path: {}", client.getFileSystem(), path); final Hashtable parsedXmsProperties; final AbfsRestOperation op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); - tracker.registerResult(op.getResult()); + perfInfo.registerResult(op.getResult()); final String xMsProperties = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_PROPERTIES); parsedXmsProperties = parseCommaSeparatedXmsProperties(xMsProperties); - tracker.registerSuccess(true); + perfInfo.registerSuccess(true); return parsedXmsProperties; } } public void setPathProperties(final Path path, final Hashtable properties) throws AzureBlobFileSystemException { - try (AbfsPerfInfo tracker = startTracking("setPathProperties", "setPathProperties")){ + try (AbfsPerfInfo perfInfo = startTracking("setPathProperties", "setPathProperties")){ LOG.debug("setFilesystemProperties for filesystem: {} path: {} with properties: {}", client.getFileSystem(), path, @@ -339,33 +349,33 @@ public void setPathProperties(final Path path, final Hashtable p throw new InvalidAbfsRestOperationException(ex); } final AbfsRestOperation op = client.setPathProperties(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), commaSeparatedProperties); - tracker.registerResult(op.getResult()).registerSuccess(true); + perfInfo.registerResult(op.getResult()).registerSuccess(true); } } public void createFilesystem() throws AzureBlobFileSystemException { - try (AbfsPerfInfo tracker = startTracking("createFilesystem", "createFilesystem")){ + try (AbfsPerfInfo perfInfo = startTracking("createFilesystem", "createFilesystem")){ LOG.debug("createFilesystem for filesystem: {}", client.getFileSystem()); final AbfsRestOperation op = client.createFilesystem(); - tracker.registerResult(op.getResult()).registerSuccess(true); + perfInfo.registerResult(op.getResult()).registerSuccess(true); } } public void deleteFilesystem() throws AzureBlobFileSystemException { - try (AbfsPerfInfo tracker = startTracking("deleteFilesystem", "deleteFilesystem")) { + try (AbfsPerfInfo perfInfo = startTracking("deleteFilesystem", "deleteFilesystem")) { LOG.debug("deleteFilesystem for filesystem: {}", client.getFileSystem()); final AbfsRestOperation op = client.deleteFilesystem(); - tracker.registerResult(op.getResult()).registerSuccess(true); + perfInfo.registerResult(op.getResult()).registerSuccess(true); } } public OutputStream createFile(final Path path, final boolean overwrite, final FsPermission permission, final FsPermission umask) throws AzureBlobFileSystemException { - try (AbfsPerfInfo tracker = startTracking("createFile", "createPath")) { + try (AbfsPerfInfo perfInfo = startTracking("createFile", "createPath")) { boolean isNamespaceEnabled = getIsNamespaceEnabled(); LOG.debug("createFile filesystem: {} path: {} overwrite: {} permission: {} umask: {} isNamespaceEnabled: {}", client.getFileSystem(), @@ -378,7 +388,7 @@ public OutputStream createFile(final Path path, final boolean overwrite, final F final AbfsRestOperation op = client.createPath(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), true, overwrite, isNamespaceEnabled ? getOctalNotation(permission) : null, isNamespaceEnabled ? getOctalNotation(umask) : null); - tracker.registerResult(op.getResult()).registerSuccess(true); + perfInfo.registerResult(op.getResult()).registerSuccess(true); return new AbfsOutputStream( client, @@ -392,7 +402,7 @@ public OutputStream createFile(final Path path, final boolean overwrite, final F public void createDirectory(final Path path, final FsPermission permission, final FsPermission umask) throws AzureBlobFileSystemException { - try (AbfsPerfInfo tracker = startTracking("createDirectory", "createPath")) { + try (AbfsPerfInfo perfInfo = startTracking("createDirectory", "createPath")) { boolean isNamespaceEnabled = getIsNamespaceEnabled(); LOG.debug("createDirectory filesystem: {} path: {} permission: {} umask: {} isNamespaceEnabled: {}", client.getFileSystem(), @@ -404,19 +414,19 @@ public void createDirectory(final Path path, final FsPermission permission, fina final AbfsRestOperation op = client.createPath(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), false, true, isNamespaceEnabled ? getOctalNotation(permission) : null, isNamespaceEnabled ? getOctalNotation(umask) : null); - tracker.registerResult(op.getResult()).registerSuccess(true); + perfInfo.registerResult(op.getResult()).registerSuccess(true); } } public AbfsInputStream openFileForRead(final Path path, final FileSystem.Statistics statistics) throws AzureBlobFileSystemException { - try (AbfsPerfInfo tracker = startTracking("openFileForRead", "getPathStatus")) { + try (AbfsPerfInfo perfInfo = startTracking("openFileForRead", "getPathStatus")) { LOG.debug("openFileForRead filesystem: {} path: {}", client.getFileSystem(), path); final AbfsRestOperation op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); - tracker.registerResult(op.getResult()); + perfInfo.registerResult(op.getResult()); final String resourceType = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_RESOURCE_TYPE); final long contentLength = Long.parseLong(op.getResult().getResponseHeader(HttpHeaderConfigurations.CONTENT_LENGTH)); @@ -430,7 +440,7 @@ public AbfsInputStream openFileForRead(final Path path, final FileSystem.Statist null); } - tracker.registerSuccess(true); + perfInfo.registerSuccess(true); // Add statistics for InputStream return new AbfsInputStream(client, statistics, @@ -442,14 +452,14 @@ public AbfsInputStream openFileForRead(final Path path, final FileSystem.Statist public OutputStream openFileForWrite(final Path path, final boolean overwrite) throws AzureBlobFileSystemException { - try (AbfsPerfInfo tracker = startTracking("openFileForWrite", "getPathStatus")) { + try (AbfsPerfInfo perfInfo = startTracking("openFileForWrite", "getPathStatus")) { LOG.debug("openFileForWrite filesystem: {} path: {} overwrite: {}", client.getFileSystem(), path, overwrite); final AbfsRestOperation op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); - tracker.registerResult(op.getResult()); + perfInfo.registerResult(op.getResult()); final String resourceType = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_RESOURCE_TYPE); final Long contentLength = Long.valueOf(op.getResult().getResponseHeader(HttpHeaderConfigurations.CONTENT_LENGTH)); @@ -464,7 +474,7 @@ public OutputStream openFileForWrite(final Path path, final boolean overwrite) t final long offset = overwrite ? 0 : contentLength; - tracker.registerSuccess(true); + perfInfo.registerSuccess(true); return new AbfsOutputStream( client, @@ -495,17 +505,17 @@ public void rename(final Path source, final Path destination) throws String continuation = null; do { - try (AbfsPerfInfo tracker = startTracking("rename", "renamePath")) { + try (AbfsPerfInfo perfInfo = startTracking("rename", "renamePath")) { AbfsRestOperation op = client.renamePath(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(source), AbfsHttpConstants.FORWARD_SLASH + getRelativePath(destination), continuation); - tracker.registerResult(op.getResult()); + perfInfo.registerResult(op.getResult()); continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION); - tracker.registerSuccess(true); + perfInfo.registerSuccess(true); countAggregate++; shouldContinue = continuation != null && !continuation.isEmpty(); if (!shouldContinue) { - tracker.registerAggregates(startAggregate, countAggregate); + perfInfo.registerAggregates(startAggregate, countAggregate); } } } while (shouldContinue); @@ -525,24 +535,24 @@ public void delete(final Path path, final boolean recursive) String continuation = null; do { - try (AbfsPerfInfo tracker = startTracking("delete", "deletePath")) { + try (AbfsPerfInfo perfInfo = startTracking("delete", "deletePath")) { AbfsRestOperation op = client.deletePath( AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), recursive, continuation); - tracker.registerResult(op.getResult()); + perfInfo.registerResult(op.getResult()); continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION); - tracker.registerSuccess(true); + perfInfo.registerSuccess(true); countAggregate++; shouldContinue = continuation != null && !continuation.isEmpty(); if (!shouldContinue) { - tracker.registerAggregates(startAggregate, countAggregate); + perfInfo.registerAggregates(startAggregate, countAggregate); } } } while (shouldContinue); } public FileStatus getFileStatus(final Path path) throws IOException { - try (AbfsPerfInfo tracker = startTracking("getFileStatus", "undetermined")) { + try (AbfsPerfInfo perfInfo = startTracking("getFileStatus", "undetermined")) { boolean isNamespaceEnabled = getIsNamespaceEnabled(); LOG.debug("getFileStatus filesystem: {} path: {} isNamespaceEnabled: {}", client.getFileSystem(), @@ -552,18 +562,18 @@ public FileStatus getFileStatus(final Path path) throws IOException { final AbfsRestOperation op; if (path.isRoot()) { if (isNamespaceEnabled) { - tracker.registerCallee("getAclStatus"); + perfInfo.registerCallee("getAclStatus"); op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + AbfsHttpConstants.ROOT_PATH); } else { - tracker.registerCallee("getFilesystemProperties"); + perfInfo.registerCallee("getFilesystemProperties"); op = client.getFilesystemProperties(); } } else { - tracker.registerCallee("getPathStatus"); + perfInfo.registerCallee("getPathStatus"); op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); } - tracker.registerResult(op.getResult()); + perfInfo.registerResult(op.getResult()); final long blockSize = abfsConfiguration.getAzureBlockSize(); final AbfsHttpOperation result = op.getResult(); @@ -592,7 +602,7 @@ public FileStatus getFileStatus(final Path path) throws IOException { false, primaryUserGroup); - tracker.registerSuccess(true); + perfInfo.registerSuccess(true); return new VersionedFileStatus( transformedOwner, @@ -652,9 +662,9 @@ public FileStatus[] listStatus(final Path path, final String startFrom) throws I ArrayList fileStatuses = new ArrayList<>(); do { - try (AbfsPerfInfo tracker = startTracking("listStatus", "listPath")) { + try (AbfsPerfInfo perfInfo = startTracking("listStatus", "listPath")) { AbfsRestOperation op = client.listPath(relativePath, false, LIST_MAX_RESULTS, continuation); - tracker.registerResult(op.getResult()); + perfInfo.registerResult(op.getResult()); continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION); ListResultSchema retrievedSchema = op.getResult().getListResultSchema(); if (retrievedSchema == null) { @@ -700,12 +710,12 @@ public FileStatus[] listStatus(final Path path, final String startFrom) throws I entry.eTag())); } - tracker.registerSuccess(true); + perfInfo.registerSuccess(true); countAggregate++; shouldContinue = continuation != null && !continuation.isEmpty(); if (!shouldContinue) { - tracker.registerAggregates(startAggregate, countAggregate); + perfInfo.registerAggregates(startAggregate, countAggregate); } } } while (shouldContinue); @@ -770,133 +780,134 @@ private String generateContinuationTokenForNonXns(final String path, final Strin public void setOwner(final Path path, final String owner, final String group) throws AzureBlobFileSystemException { - try (AbfsPerfInfo tracker = startTracking("setOwner", "setOwner")) { - if (!getIsNamespaceEnabled()) { - throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); - } + if (!getIsNamespaceEnabled()) { + throw new UnsupportedOperationException( + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + } - LOG.debug( - "setOwner filesystem: {} path: {} owner: {} group: {}", - client.getFileSystem(), - path.toString(), - owner, - group); + LOG.debug( + "setOwner filesystem: {} path: {} owner: {} group: {}", + client.getFileSystem(), + path.toString(), + owner, + group); + try (AbfsPerfInfo perfInfo = startTracking("setOwner", "setOwner")) { final String transformedOwner = identityTransformer.transformUserOrGroupForSetRequest(owner); final String transformedGroup = identityTransformer.transformUserOrGroupForSetRequest(group); final AbfsRestOperation op = client.setOwner(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), transformedOwner, transformedGroup); - tracker.registerResult(op.getResult()).registerSuccess(true); + perfInfo.registerResult(op.getResult()).registerSuccess(true); } } public void setPermission(final Path path, final FsPermission permission) throws AzureBlobFileSystemException { - try (AbfsPerfInfo tracker = startTracking("setPermission", "setPermission")) { - if (!getIsNamespaceEnabled()) { - throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); - } + if (!getIsNamespaceEnabled()) { + throw new UnsupportedOperationException( + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + } - LOG.debug( - "setPermission filesystem: {} path: {} permission: {}", - client.getFileSystem(), - path.toString(), - permission.toString()); + LOG.debug( + "setPermission filesystem: {} path: {} permission: {}", + client.getFileSystem(), + path.toString(), + permission.toString()); + + try (AbfsPerfInfo perfInfo = startTracking("setPermission", "setPermission")) { final AbfsRestOperation op = client.setPermission(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), String.format(AbfsHttpConstants.PERMISSION_FORMAT, permission.toOctal())); - tracker.registerResult(op.getResult()).registerSuccess(true); + perfInfo.registerResult(op.getResult()).registerSuccess(true); } } public void modifyAclEntries(final Path path, final List aclSpec) throws AzureBlobFileSystemException { - try (AbfsPerfInfo trackerGet = startTracking("modifyAclEntries", "getAclStatus")) { - if (!getIsNamespaceEnabled()) { - throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); - } + if (!getIsNamespaceEnabled()) { + throw new UnsupportedOperationException( + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + } - LOG.debug( - "modifyAclEntries filesystem: {} path: {} aclSpec: {}", - client.getFileSystem(), - path.toString(), - AclEntry.aclSpecToString(aclSpec)); + LOG.debug( + "modifyAclEntries filesystem: {} path: {} aclSpec: {}", + client.getFileSystem(), + path.toString(), + AclEntry.aclSpecToString(aclSpec)); + try (AbfsPerfInfo perfInfoGet = startTracking("modifyAclEntries", "getAclStatus")) { identityTransformer.transformAclEntriesForSetRequest(aclSpec); final Map modifyAclEntries = AbfsAclHelper.deserializeAclSpec(AclEntry.aclSpecToString(aclSpec)); boolean useUpn = AbfsAclHelper.isUpnFormatAclEntries(modifyAclEntries); final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), useUpn); - trackerGet.registerResult(op.getResult()); + perfInfoGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); AbfsAclHelper.modifyAclEntriesInternal(aclEntries, modifyAclEntries); - trackerGet.registerSuccess(true).finishTracking(); + perfInfoGet.registerSuccess(true).finishTracking(); - try (AbfsPerfInfo trackerSet = startTracking("modifyAclEntries", "setAcl")) { + try (AbfsPerfInfo perfInfoSet = startTracking("modifyAclEntries", "setAcl")) { final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), AbfsAclHelper.serializeAclSpec(aclEntries), eTag); - trackerSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(trackerGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); + perfInfoSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); } } } public void removeAclEntries(final Path path, final List aclSpec) throws AzureBlobFileSystemException { - try (AbfsPerfInfo trackerGet = startTracking("removeAclEntries", "getAclStatus")) { - if (!getIsNamespaceEnabled()) { - throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); - } + if (!getIsNamespaceEnabled()) { + throw new UnsupportedOperationException( + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + } - LOG.debug( - "removeAclEntries filesystem: {} path: {} aclSpec: {}", - client.getFileSystem(), - path.toString(), - AclEntry.aclSpecToString(aclSpec)); + LOG.debug( + "removeAclEntries filesystem: {} path: {} aclSpec: {}", + client.getFileSystem(), + path.toString(), + AclEntry.aclSpecToString(aclSpec)); + try (AbfsPerfInfo perfInfoGet = startTracking("removeAclEntries", "getAclStatus")) { identityTransformer.transformAclEntriesForSetRequest(aclSpec); final Map removeAclEntries = AbfsAclHelper.deserializeAclSpec(AclEntry.aclSpecToString(aclSpec)); boolean isUpnFormat = AbfsAclHelper.isUpnFormatAclEntries(removeAclEntries); final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), isUpnFormat); - trackerGet.registerResult(op.getResult()); + perfInfoGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); AbfsAclHelper.removeAclEntriesInternal(aclEntries, removeAclEntries); - trackerGet.registerSuccess(true).finishTracking(); + perfInfoGet.registerSuccess(true).finishTracking(); - try (AbfsPerfInfo trackerSet = startTracking("removeAclEntries", "setAcl")) { + try (AbfsPerfInfo perfInfoSet = startTracking("removeAclEntries", "setAcl")) { final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), AbfsAclHelper.serializeAclSpec(aclEntries), eTag); - trackerSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(trackerGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); + perfInfoSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); } } } public void removeDefaultAcl(final Path path) throws AzureBlobFileSystemException { - try (AbfsPerfInfo trackerGet = startTracking("removeDefaultAcl", "getAclStatus")) { - if (!getIsNamespaceEnabled()) { - throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); - } + if (!getIsNamespaceEnabled()) { + throw new UnsupportedOperationException( + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + } - LOG.debug( - "removeDefaultAcl filesystem: {} path: {}", - client.getFileSystem(), - path.toString()); + LOG.debug( + "removeDefaultAcl filesystem: {} path: {}", + client.getFileSystem(), + path.toString()); + try (AbfsPerfInfo perfInfoGet = startTracking("removeDefaultAcl", "getAclStatus")) { final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); - trackerGet.registerResult(op.getResult()); + perfInfoGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); final Map defaultAclEntries = new HashMap<>(); @@ -909,29 +920,30 @@ public void removeDefaultAcl(final Path path) throws AzureBlobFileSystemExceptio aclEntries.keySet().removeAll(defaultAclEntries.keySet()); - trackerGet.registerSuccess(true).finishTracking(); + perfInfoGet.registerSuccess(true).finishTracking(); - try (AbfsPerfInfo trackerSet = startTracking("removeDefaultAcl", "setAcl")) { + try (AbfsPerfInfo perfInfoSet = startTracking("removeDefaultAcl", "setAcl")) { final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), AbfsAclHelper.serializeAclSpec(aclEntries), eTag); - trackerSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(trackerGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); + perfInfoSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); } } } public void removeAcl(final Path path) throws AzureBlobFileSystemException { - try (AbfsPerfInfo trackerGet = startTracking("removeAcl", "getAclStatus")){ - if (!getIsNamespaceEnabled()) { - throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); - } + if (!getIsNamespaceEnabled()) { + throw new UnsupportedOperationException( + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + } - LOG.debug( - "removeAcl filesystem: {} path: {}", - client.getFileSystem(), - path.toString()); + LOG.debug( + "removeAcl filesystem: {} path: {}", + client.getFileSystem(), + path.toString()); + + try (AbfsPerfInfo perfInfoGet = startTracking("removeAcl", "getAclStatus")){ final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); - trackerGet.registerResult(op.getResult()); + perfInfoGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); @@ -941,64 +953,65 @@ public void removeAcl(final Path path) throws AzureBlobFileSystemException { newAclEntries.put(AbfsHttpConstants.ACCESS_GROUP, aclEntries.get(AbfsHttpConstants.ACCESS_GROUP)); newAclEntries.put(AbfsHttpConstants.ACCESS_OTHER, aclEntries.get(AbfsHttpConstants.ACCESS_OTHER)); - trackerGet.registerSuccess(true).finishTracking(); + perfInfoGet.registerSuccess(true).finishTracking(); - try (AbfsPerfInfo trackerSet = startTracking("removeAcl", "setAcl")) { + try (AbfsPerfInfo perfInfoSet = startTracking("removeAcl", "setAcl")) { final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), AbfsAclHelper.serializeAclSpec(newAclEntries), eTag); - trackerSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(trackerGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); + perfInfoSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); } } } public void setAcl(final Path path, final List aclSpec) throws AzureBlobFileSystemException { - try (AbfsPerfInfo trackerGet = startTracking("setAcl", "getAclStatus")) { - if (!getIsNamespaceEnabled()) { - throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); - } + if (!getIsNamespaceEnabled()) { + throw new UnsupportedOperationException( + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + } - LOG.debug( - "setAcl filesystem: {} path: {} aclspec: {}", - client.getFileSystem(), - path.toString(), - AclEntry.aclSpecToString(aclSpec)); + LOG.debug( + "setAcl filesystem: {} path: {} aclspec: {}", + client.getFileSystem(), + path.toString(), + AclEntry.aclSpecToString(aclSpec)); + try (AbfsPerfInfo perfInfoGet = startTracking("setAcl", "getAclStatus")) { identityTransformer.transformAclEntriesForSetRequest(aclSpec); final Map aclEntries = AbfsAclHelper.deserializeAclSpec(AclEntry.aclSpecToString(aclSpec)); final boolean isUpnFormat = AbfsAclHelper.isUpnFormatAclEntries(aclEntries); final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), isUpnFormat); - trackerGet.registerResult(op.getResult()); + perfInfoGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); final Map getAclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); AbfsAclHelper.setAclEntriesInternal(aclEntries, getAclEntries); - trackerGet.registerSuccess(true).finishTracking(); + perfInfoGet.registerSuccess(true).finishTracking(); - try (AbfsPerfInfo trackerSet = startTracking("setAcl", "setAcl")) { + try (AbfsPerfInfo perfInfoSet = startTracking("setAcl", "setAcl")) { final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), AbfsAclHelper.serializeAclSpec(aclEntries), eTag); - trackerSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(trackerGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); + perfInfoSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); } } } public AclStatus getAclStatus(final Path path) throws IOException { - try (AbfsPerfInfo tracker = startTracking("getAclStatus", "getAclStatus")) { - if (!getIsNamespaceEnabled()) { - throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); - } + if (!getIsNamespaceEnabled()) { + throw new UnsupportedOperationException( + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + } - LOG.debug( - "getAclStatus filesystem: {} path: {}", - client.getFileSystem(), - path.toString()); + LOG.debug( + "getAclStatus filesystem: {} path: {}", + client.getFileSystem(), + path.toString()); + + try (AbfsPerfInfo perfInfo = startTracking("getAclStatus", "getAclStatus")) { AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); - tracker.registerResult(op.getResult()); + perfInfo.registerResult(op.getResult()); final String transformedOwner = identityTransformer.transformIdentityForGetRequest( op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_OWNER), @@ -1024,7 +1037,7 @@ public AclStatus getAclStatus(final Path path) throws IOException { aclStatusBuilder.setPermission(fsPermission); aclStatusBuilder.stickyBit(fsPermission.getStickyBit()); aclStatusBuilder.addEntries(aclEntries); - tracker.registerSuccess(true); + perfInfo.registerSuccess(true); return aclStatusBuilder.build(); } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java index c8d43904debcd..79bba094f0e44 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java @@ -58,6 +58,7 @@ public final class HttpHeaderConfigurations { public static final String X_MS_PERMISSIONS = "x-ms-permissions"; public static final String X_MS_UMASK = "x-ms-umask"; public static final String X_MS_NAMESPACE_ENABLED = "x-ms-namespace-enabled"; + public static final String X_MS_ABFS_CLIENT_LATENCY = "x-ms-abfs-client-latency"; private HttpHeaderConfigurations() {} } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AbfsPerfLoggable.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AbfsPerfLoggable.java index 041115c8a98d1..5717ee76043a9 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AbfsPerfLoggable.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AbfsPerfLoggable.java @@ -26,7 +26,7 @@ @InterfaceStability.Evolving public interface AbfsPerfLoggable { /** - * Get's the string to log to the Abfs Logging API. + * Gets the string to log to the Abfs Logging API. */ String getLogString(); } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java index 6be59deaedcf3..881d41f65f27d 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java @@ -168,35 +168,41 @@ public String toString() { public String getLogString() { String urlStr = null; - try{ + try { urlStr = URLEncoder.encode(url.toString(), "UTF-8"); } catch(UnsupportedEncodingException e) { urlStr = "https%3A%2F%2Ffailed%2Fto%2Fencode%2Furl"; } - return new StringBuilder() - .append("s=") + final StringBuilder sb = new StringBuilder(); + sb.append("s=") .append(statusCode) .append(" e=") .append(storageErrorCode) .append(" ci=") .append(clientRequestId) .append(" ri=") - .append(requestId) - .append(" ct=") - .append(connectionTimeMs) - .append(" st=") - .append(sendRequestTimeMs) - .append(" rt=") - .append(recvResponseTimeMs) - .append(" bs=") + .append(requestId); + + if (isTraceEnabled) { + sb.append(" ct=") + .append(connectionTimeMs) + .append(" st=") + .append(sendRequestTimeMs) + .append(" rt=") + .append(recvResponseTimeMs); + } + + sb.append(" bs=") .append(bytesSent) .append(" br=") .append(bytesReceived) .append(" m=") .append(method) .append(" u=") - .append(urlStr).toString(); + .append(urlStr); + + return sb.toString(); } /** diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java index ecccf814cd33c..1f343424fbff8 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java @@ -226,9 +226,10 @@ int readRemote(long position, byte[] b, int offset, int length) throws IOExcepti throw new IllegalArgumentException("requested read length is more than will fit after requested offset in buffer"); } final AbfsRestOperation op; - try (AbfsPerfInfo tracker = new AbfsPerfInfo(client.getAbfsPerfTracker(), "readRemote", "read")) { + AbfsPerfTracker tracker = client.getAbfsPerfTracker(); + try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, "readRemote", "read")) { op = client.read(path, position, b, offset, length, tolerateOobAppends ? "*" : eTag); - tracker.registerResult(op.getResult()).registerSuccess(true); + perfInfo.registerResult(op.getResult()).registerSuccess(true); } catch (AzureBlobFileSystemException ex) { if (ex instanceof AbfsRestOperationException) { AbfsRestOperationException ere = (AbfsRestOperationException) ex; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java index b9bd043b586b2..2d409416e8647 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java @@ -289,11 +289,14 @@ private synchronized void writeCurrentBufferToService() throws IOException { final Future job = completionService.submit(new Callable() { @Override public Void call() throws Exception { - try (AbfsPerfInfo tracker = new AbfsPerfInfo(client.getAbfsPerfTracker(), "writeCurrentBufferToService", "append")) { - tracker.registerResult(client.append(path, offset, bytes, 0, - bytesLength).getResult()); + AbfsPerfTracker tracker = client.getAbfsPerfTracker(); + try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, + "writeCurrentBufferToService", "append")) { + AbfsRestOperation op = client.append(path, offset, bytes, 0, + bytesLength); + perfInfo.registerResult(op.getResult()); byteBufferPool.putBuffer(ByteBuffer.wrap(bytes)); - tracker.registerSuccess(true); + perfInfo.registerSuccess(true); return null; } } @@ -337,8 +340,11 @@ private synchronized void flushWrittenBytesToServiceAsync() throws IOException { private synchronized void flushWrittenBytesToServiceInternal(final long offset, final boolean retainUncommitedData, final boolean isClose) throws IOException { - try (AbfsPerfInfo tracker = new AbfsPerfInfo(client.getAbfsPerfTracker(), "flushWrittenBytesToServiceInternal", "flush")) { - tracker.registerResult(client.flush(path, offset, retainUncommitedData, isClose).getResult()).registerSuccess(true); + AbfsPerfTracker tracker = client.getAbfsPerfTracker(); + try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, + "flushWrittenBytesToServiceInternal", "flush")) { + AbfsRestOperation op = client.flush(path, offset, retainUncommitedData, isClose); + perfInfo.registerResult(op.getResult()).registerSuccess(true); } catch (AzureBlobFileSystemException ex) { if (ex instanceof AbfsRestOperationException) { if (((AbfsRestOperationException) ex).getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java index 9578cc71ae47d..166a310fdd716 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java @@ -24,6 +24,7 @@ import java.time.Instant; import java.util.concurrent.ConcurrentLinkedQueue; +import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,9 +54,11 @@ * cr: name of the caller method * ce: name of the callee method * r: result (Succeeded/Failed) - * l: latency (time spend in callee) - * ls: latency sum (aggregate time spend in caller; logged when there are multiple callees) - * lc: latency count (number of callees; logged when there are multiple callees) + * l: latency (time spent in callee) + * ls: latency sum (aggregate time spent in caller; logged when there are multiple callees; + * logged with the last callee) + * lc: latency count (number of callees; logged when there are multiple callees; + * logged with the last callee) * s: HTTP Status code * e: Error code * ci: client request ID @@ -74,6 +77,30 @@ public final class AbfsPerfTracker { // the logger private static final Logger LOG = LoggerFactory.getLogger(AbfsPerfTracker.class); + // the field names of perf log lines + private static final String HostNameKey = "h"; + private static final String TimestampKey = "t"; + private static final String StorageAccountNameKey = "a"; + private static final String ContainerNameKey = "c"; + private static final String CallerMethodNameKey = "cr"; + private static final String CalleeMethodNameKey = "ce"; + private static final String ResultKey = "r"; + private static final String LatencyKey = "l"; + private static final String LatencySumKey = "ls"; + private static final String LatencyCountKey = "lc"; + private static final String HttpStatusCodeKey = "s"; + private static final String ErrorCodeKey = "e"; + private static final String ClientRequestIdKey = "ci"; + private static final String ServerRequestIdKey = "ri"; + private static final String ConnectionTimeKey = "ct"; + private static final String SendingTimeKey = "st"; + private static final String ReceivingTimeKey = "rt"; + private static final String BytesSentKey = "bs"; + private static final String BytesReceivedKey = "br"; + private static final String HttpMethodKey = "m"; + private static final String HttpUrlKey = "u"; + private static final String StringPlaceholder = "%s"; + // the queue to hold latency information private final ConcurrentLinkedQueue queue = new ConcurrentLinkedQueue(); @@ -105,8 +132,71 @@ protected AbfsPerfTracker(String filesystemName, String accountName, boolean ena hostName = "UnknownHost"; } - singletonLatencyReportingFormat = "h=" + hostName + " t=%s a=" + accountName + " c=" + filesystemName + " cr=%s ce=%s r=%s l=%s%s"; - aggregateLatencyReportingFormat = "h=" + hostName + " t=%s a=" + accountName + " c=" + filesystemName + " cr=%s ce=%s r=%s l=%s ls=%s lc=%s%s"; + String commonReportingFormat = new StringBuilder() + .append(HostNameKey) + .append(AbfsHttpConstants.EQUAL) + .append(hostName) + .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) + .append(TimestampKey) + .append(AbfsHttpConstants.EQUAL) + .append(StringPlaceholder) + .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) + .append(StorageAccountNameKey) + .append(AbfsHttpConstants.EQUAL) + .append(accountName) + .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) + .append(ContainerNameKey) + .append(AbfsHttpConstants.EQUAL) + .append(filesystemName) + .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) + .append(CallerMethodNameKey) + .append(AbfsHttpConstants.EQUAL) + .append(StringPlaceholder) + .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) + .append(CalleeMethodNameKey) + .append(AbfsHttpConstants.EQUAL) + .append(StringPlaceholder) + .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) + .append(ResultKey) + .append(AbfsHttpConstants.EQUAL) + .append(StringPlaceholder) + .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) + .append(LatencyKey) + .append(AbfsHttpConstants.EQUAL) + .append(StringPlaceholder) + .toString(); + + /** + * Example singleton log (no ls or lc field) + * h=KARMA t=2019-10-25T20:21:14.518Z a=abfstest01.dfs.core.windows.net + * c=abfs-testcontainer-84828169-6488-4a62-a875-1e674275a29f cr=delete ce=deletePath r=Succeeded l=32 s=200 + * e= ci=95121dae-70a8-4187-b067-614091034558 ri=97effdcf-201f-0097-2d71-8bae00000000 ct=0 st=0 rt=0 bs=0 br=0 m=DELETE + * u=https%3A%2F%2Fabfstest01.dfs.core.windows.net%2Fabfs-testcontainer%2Ftest%3Ftimeout%3D90%26recursive%3Dtrue + */ + singletonLatencyReportingFormat = new StringBuilder() + .append(commonReportingFormat) + .append(StringPlaceholder) + .toString(); + + /** + * Example aggregate log + * h=KARMA t=2019-10-25T20:21:14.518Z a=abfstest01.dfs.core.windows.net + * c=abfs-testcontainer-84828169-6488-4a62-a875-1e674275a29f cr=delete ce=deletePath r=Succeeded l=32 ls=32 lc=1 s=200 + * e= ci=95121dae-70a8-4187-b067-614091034558 ri=97effdcf-201f-0097-2d71-8bae00000000 ct=0 st=0 rt=0 bs=0 br=0 m=DELETE + * u=https%3A%2F%2Fabfstest01.dfs.core.windows.net%2Fabfs-testcontainer%2Ftest%3Ftimeout%3D90%26recursive%3Dtrue + */ + aggregateLatencyReportingFormat = new StringBuilder() + .append(commonReportingFormat) + .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) + .append(LatencySumKey) + .append(AbfsHttpConstants.EQUAL) + .append(StringPlaceholder) + .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) + .append(LatencyCountKey) + .append(AbfsHttpConstants.EQUAL) + .append(StringPlaceholder) + .append(StringPlaceholder) + .toString(); } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java index 44b8ee5ecd7ef..d4b96a97affea 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java @@ -125,7 +125,9 @@ void execute() throws AzureBlobFileSystemException { String latencyHeader = this.client.getAbfsPerfTracker().getClientLatency(); if (latencyHeader != null && !latencyHeader.isEmpty()) { - requestHeaders.add(new AbfsHttpHeader("x-ms-abfs-client-latency", latencyHeader)); + AbfsHttpHeader httpHeader = + new AbfsHttpHeader(HttpHeaderConfigurations.X_MS_ABFS_CLIENT_LATENCY, latencyHeader); + requestHeaders.add(httpHeader); } int retryCount = 0; diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md index fcc94fd68533b..47739a75438b3 100644 --- a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md +++ b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md @@ -664,15 +664,17 @@ performance issues. ### Perf Options #### 1. HTTP Request Tracking Options -If you set `fs.azure.abfs.latency.track` to `true`, the module starts tracking the performance metrics of ABFS HTTP -traffic. To obtain these numbers on your machine or cluster, you will also need to enable -debug logging for the `AbfsPerfTracker` class in your `log4j` config. A typical perf log line appears like: +If you set `fs.azure.abfs.latency.track` to `true`, the module starts tracking the +performance metrics of ABFS HTTP traffic. To obtain these numbers on your machine +or cluster, you will also need to enable debug logging for the `AbfsPerfTracker` +class in your `log4j` config. A typical perf log line appears like: ``` h=KARMA t=2019-10-25T20:21:14.518Z a=abfstest01.dfs.core.windows.net -c=abfs-testcontainer-84828169-6488-4a62-a875-1e674275a29f cr=delete ce=deletePath r=Succeeded l=32 ls=32 lc=1 s=200 -e= ci=95121dae-70a8-4187-b067-614091034558 ri=97effdcf-201f-0097-2d71-8bae00000000 ct=0 st=0 rt=0 bs=0 br=0 m=DELETE -u=https%3A%2F%2Fabfstest01.dfs.core.windows.net%2Fabfs-testcontainer%2Ftest%3Ftimeout%3D90%26recursive%3Dtrue +c=abfs-testcontainer-84828169-6488-4a62-a875-1e674275a29f cr=delete ce=deletePath +r=Succeeded l=32 ls=32 lc=1 s=200 e= ci=95121dae-70a8-4187-b067-614091034558 +ri=97effdcf-201f-0097-2d71-8bae00000000 ct=0 st=0 rt=0 bs=0 br=0 m=DELETE +u=https%3A%2F%2Fabfstest01.dfs.core.windows.net%2Ftestcontainer%2Ftest%3Ftimeout%3D90%26recursive%3Dtrue ``` The fields have the following definitions: @@ -684,9 +686,11 @@ The fields have the following definitions: `cr`: name of the caller method `ce`: name of the callee method `r`: result (Succeeded/Failed) -`l`: latency (time spend in callee) -`ls`: latency sum (aggregate time spend in caller; logged when there are multiple callees) -`lc`: latency count (number of callees; logged when there are multiple callees) +`l`: latency (time spent in callee) +`ls`: latency sum (aggregate time spent in caller; logged when there are multiple +callees; logged with the last callee) +`lc`: latency count (number of callees; logged when there are multiple callees; +logged with the last callee) `s`: HTTP Status code `e`: Error code `ci`: client request ID @@ -699,8 +703,9 @@ The fields have the following definitions: `m`: HTTP method (GET, PUT etc) `u`: Encoded HTTP URL -Note that these performance numbers are also sent back to the ADLS Gen 2 API endpoints in the `x-ms-abfs-client-latency` HTTP -headers in subsequent requests. Azure uses these settings to track their end-to-end latency. +Note that these performance numbers are also sent back to the ADLS Gen 2 API endpoints +in the `x-ms-abfs-client-latency` HTTP headers in subsequent requests. Azure uses these +settings to track their end-to-end latency. ## Troubleshooting diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsPerfTracker.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsPerfTracker.java index 72689be34edb8..5bedf48a91921 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsPerfTracker.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsPerfTracker.java @@ -27,10 +27,8 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; -import java.util.regex.Pattern; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.slf4j.Logger; @@ -72,8 +70,10 @@ public void verifyDisablingOfTracker() throws Exception { String latencyDetails = abfsPerfTracker.getClientLatency(); assertThat(latencyDetails).describedAs("AbfsPerfTracker should be empty").isNull(); - try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "disablingCaller", "disablingCallee")) { - tracker.registerResult(new AbfsHttpOperation(url, "GET", new ArrayList<>())).registerSuccess(true); + try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "disablingCaller", + "disablingCallee")) { + AbfsHttpOperation op = new AbfsHttpOperation(url, "GET", new ArrayList<>()); + tracker.registerResult(op).registerSuccess(true); } latencyDetails = abfsPerfTracker.getClientLatency(); @@ -92,9 +92,10 @@ public void verifyTrackingForSingletonLatencyRecords() throws Exception { List> tasks = new ArrayList<>(); AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList<>()); - for (int i=0; i < numTasks; i++) { + for (int i = 0; i < numTasks; i++) { tasks.add(() -> { - try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", "oneOperationCallee")) { + try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", + "oneOperationCallee")) { tracker.registerResult(httpOperation).registerSuccess(true); return 0; } @@ -105,13 +106,13 @@ public void verifyTrackingForSingletonLatencyRecords() throws Exception { fr.get(); } - for (int i=0; i < numTasks; i++) { + for (int i = 0; i < numTasks; i++) { latencyDetails = abfsPerfTracker.getClientLatency(); assertThat(latencyDetails).describedAs("AbfsPerfTracker should return non-null record").isNotNull(); - assertThat(Pattern.matches( - "h=[^ ]* t=[^ ]* a=bogusFilesystemName c=bogusAccountName cr=oneOperationCaller ce=oneOperationCallee r=Succeeded l=[0-9]+" - + " s=0 e= ci=[^ ]* ri=[^ ]* ct=[0-9]+ st=[0-9]+ rt=[0-9]+ bs=0 br=0 m=GET u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile", latencyDetails)) - .describedAs("Latency record should be in the correct format").isTrue(); + assertThat(latencyDetails).describedAs("Latency record should be in the correct format") + .containsPattern("h=[^ ]* t=[^ ]* a=bogusFilesystemName c=bogusAccountName cr=oneOperationCaller" + + " ce=oneOperationCallee r=Succeeded l=[0-9]+ s=0 e= ci=[^ ]* ri=[^ ]* bs=0 br=0 m=GET" + + " u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile"); } latencyDetails = abfsPerfTracker.getClientLatency(); @@ -125,15 +126,17 @@ public void verifyTrackingForAggregateLatencyRecords() throws Exception { AbfsPerfTracker abfsPerfTracker = new AbfsPerfTracker(accountName, filesystemName, true); String latencyDetails = abfsPerfTracker.getClientLatency(); - Assert.assertNull("AbfsPerfTracker should be empty", latencyDetails); + assertThat(latencyDetails).describedAs("AbfsPerfTracker should be empty").isNull(); List> tasks = new ArrayList<>(); AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList<>()); - for (int i=0; i < numTasks; i++) { + for (int i = 0; i < numTasks; i++) { tasks.add(() -> { - try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", "oneOperationCallee")) { - tracker.registerResult(httpOperation).registerSuccess(true).registerAggregates(Instant.now(), TEST_AGGREGATE_COUNT); + try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", + "oneOperationCallee")) { + tracker.registerResult(httpOperation).registerSuccess(true) + .registerAggregates(Instant.now(), TEST_AGGREGATE_COUNT); return 0; } }); @@ -143,13 +146,13 @@ public void verifyTrackingForAggregateLatencyRecords() throws Exception { fr.get(); } - for (int i=0; i < numTasks; i++) { + for (int i = 0; i < numTasks; i++) { latencyDetails = abfsPerfTracker.getClientLatency(); assertThat(latencyDetails).describedAs("AbfsPerfTracker should return non-null record").isNotNull(); - assertThat(Pattern.matches( - "h=[^ ]* t=[^ ]* a=bogusFilesystemName c=bogusAccountName cr=oneOperationCaller ce=oneOperationCallee r=Succeeded l=[0-9]+" - + " ls=[0-9]+ lc=" + TEST_AGGREGATE_COUNT + " s=0 e= ci=[^ ]* ri=[^ ]* ct=[0-9]+ st=[0-9]+ rt=[0-9]+ bs=0 br=0 m=GET u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile", latencyDetails)) - .describedAs("Latency record should be in the correct format").isTrue(); + assertThat(latencyDetails).describedAs("Latency record should be in the correct format") + .containsPattern("h=[^ ]* t=[^ ]* a=bogusFilesystemName c=bogusAccountName cr=oneOperationCaller" + + " ce=oneOperationCallee r=Succeeded l=[0-9]+ ls=[0-9]+ lc=" + TEST_AGGREGATE_COUNT + + " s=0 e= ci=[^ ]* ri=[^ ]* bs=0 br=0 m=GET u=http%3A%2F%2Fwww.microsoft.com%2FbogusFile"); } latencyDetails = abfsPerfTracker.getClientLatency(); @@ -167,11 +170,12 @@ public void verifyRecordingSingletonLatencyIsCheapWhenDisabled() throws Exceptio List> tasks = new ArrayList<>(); final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList<>()); - for (int i=0; i < numTasks; i++) { + for (int i = 0; i < numTasks; i++) { tasks.add(() -> { Instant startRecord = Instant.now(); - try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", "oneOperationCallee")) { + try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", + "oneOperationCallee")) { tracker.registerResult(httpOperation).registerSuccess(true); } @@ -201,12 +205,14 @@ public void verifyRecordingAggregateLatencyIsCheapWhenDisabled() throws Exceptio List> tasks = new ArrayList<>(); final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList<>()); - for (int i=0; i < numTasks; i++) { + for (int i = 0; i < numTasks; i++) { tasks.add(() -> { Instant startRecord = Instant.now(); - try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", "oneOperationCallee")) { - tracker.registerResult(httpOperation).registerSuccess(true).registerAggregates(startRecord, TEST_AGGREGATE_COUNT); + try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", + "oneOperationCallee")) { + tracker.registerResult(httpOperation).registerSuccess(true) + .registerAggregates(startRecord, TEST_AGGREGATE_COUNT); } long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); @@ -234,7 +240,7 @@ public void verifyGettingLatencyRecordsIsCheapWhenDisabled() throws Exception { AbfsPerfTracker abfsPerfTracker = new AbfsPerfTracker(accountName, filesystemName, false); List> tasks = new ArrayList<>(); - for (int i=0; i < numTasks; i++) { + for (int i = 0; i < numTasks; i++) { tasks.add(() -> { Instant startGet = Instant.now(); abfsPerfTracker.getClientLatency(); @@ -263,11 +269,12 @@ public void verifyRecordingSingletonLatencyIsCheapWhenEnabled() throws Exception List> tasks = new ArrayList<>(); final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList<>()); - for (int i=0; i < numTasks; i++) { + for (int i = 0; i < numTasks; i++) { tasks.add(() -> { Instant startRecord = Instant.now(); - try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", "oneOperationCallee")) { + try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", + "oneOperationCallee")) { tracker.registerResult(httpOperation).registerSuccess(true); } @@ -296,12 +303,14 @@ public void verifyRecordingAggregateLatencyIsCheapWhenEnabled() throws Exception List> tasks = new ArrayList<>(); final AbfsHttpOperation httpOperation = new AbfsHttpOperation(url, "GET", new ArrayList<>()); - for (int i=0; i < numTasks; i++) { + for (int i = 0; i < numTasks; i++) { tasks.add(() -> { Instant startRecord = Instant.now(); - try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", "oneOperationCallee")) { - tracker.registerResult(httpOperation).registerSuccess(true).registerAggregates(startRecord, TEST_AGGREGATE_COUNT); + try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "oneOperationCaller", + "oneOperationCallee")) { + tracker.registerResult(httpOperation).registerSuccess(true). + registerAggregates(startRecord, TEST_AGGREGATE_COUNT); } long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis(); @@ -328,7 +337,7 @@ public void verifyGettingLatencyRecordsIsCheapWhenEnabled() throws Exception { AbfsPerfTracker abfsPerfTracker = new AbfsPerfTracker(accountName, filesystemName, true); List> tasks = new ArrayList<>(); - for (int i=0; i < numTasks; i++) { + for (int i = 0; i < numTasks; i++) { tasks.add(() -> { Instant startRecord = Instant.now(); abfsPerfTracker.getClientLatency(); @@ -348,7 +357,7 @@ public void verifyGettingLatencyRecordsIsCheapWhenEnabled() throws Exception { } @Test - public void verifyNoExceptionOnInvalidInputWhenDisabled() throws Exception { + public void verifyNoExceptionOnInvalidInput() throws Exception { Instant testInstant = Instant.now(); AbfsPerfTracker abfsPerfTrackerDisabled = new AbfsPerfTracker(accountName, filesystemName, false); AbfsPerfTracker abfsPerfTrackerEnabled = new AbfsPerfTracker(accountName, filesystemName, true); From 0c4b559265e820e894444aea1a8160d1fdc6a346 Mon Sep 17 00:00:00 2001 From: Jeetesh Mangwani Date: Sun, 3 Nov 2019 00:27:41 -0700 Subject: [PATCH 08/13] include the logging into perf tracking as well --- .../fs/azurebfs/AzureBlobFileSystemStore.java | 109 ++++++++++-------- 1 file changed, 59 insertions(+), 50 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index 4cc504b4429e5..2b1a96664a1ef 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -785,14 +785,15 @@ public void setOwner(final Path path, final String owner, final String group) th "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } - LOG.debug( - "setOwner filesystem: {} path: {} owner: {} group: {}", - client.getFileSystem(), - path.toString(), - owner, - group); - try (AbfsPerfInfo perfInfo = startTracking("setOwner", "setOwner")) { + + LOG.debug( + "setOwner filesystem: {} path: {} owner: {} group: {}", + client.getFileSystem(), + path.toString(), + owner, + group); + final String transformedOwner = identityTransformer.transformUserOrGroupForSetRequest(owner); final String transformedGroup = identityTransformer.transformUserOrGroupForSetRequest(group); @@ -809,13 +810,14 @@ public void setPermission(final Path path, final FsPermission permission) throws "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } - LOG.debug( - "setPermission filesystem: {} path: {} permission: {}", - client.getFileSystem(), - path.toString(), - permission.toString()); - try (AbfsPerfInfo perfInfo = startTracking("setPermission", "setPermission")) { + + LOG.debug( + "setPermission filesystem: {} path: {} permission: {}", + client.getFileSystem(), + path.toString(), + permission.toString()); + final AbfsRestOperation op = client.setPermission(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), String.format(AbfsHttpConstants.PERMISSION_FORMAT, permission.toOctal())); @@ -830,13 +832,14 @@ public void modifyAclEntries(final Path path, final List aclSpec) thro "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } - LOG.debug( - "modifyAclEntries filesystem: {} path: {} aclSpec: {}", - client.getFileSystem(), - path.toString(), - AclEntry.aclSpecToString(aclSpec)); - try (AbfsPerfInfo perfInfoGet = startTracking("modifyAclEntries", "getAclStatus")) { + + LOG.debug( + "modifyAclEntries filesystem: {} path: {} aclSpec: {}", + client.getFileSystem(), + path.toString(), + AclEntry.aclSpecToString(aclSpec)); + identityTransformer.transformAclEntriesForSetRequest(aclSpec); final Map modifyAclEntries = AbfsAclHelper.deserializeAclSpec(AclEntry.aclSpecToString(aclSpec)); boolean useUpn = AbfsAclHelper.isUpnFormatAclEntries(modifyAclEntries); @@ -865,13 +868,14 @@ public void removeAclEntries(final Path path, final List aclSpec) thro "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } - LOG.debug( - "removeAclEntries filesystem: {} path: {} aclSpec: {}", - client.getFileSystem(), - path.toString(), - AclEntry.aclSpecToString(aclSpec)); - try (AbfsPerfInfo perfInfoGet = startTracking("removeAclEntries", "getAclStatus")) { + + LOG.debug( + "removeAclEntries filesystem: {} path: {} aclSpec: {}", + client.getFileSystem(), + path.toString(), + AclEntry.aclSpecToString(aclSpec)); + identityTransformer.transformAclEntriesForSetRequest(aclSpec); final Map removeAclEntries = AbfsAclHelper.deserializeAclSpec(AclEntry.aclSpecToString(aclSpec)); boolean isUpnFormat = AbfsAclHelper.isUpnFormatAclEntries(removeAclEntries); @@ -900,12 +904,13 @@ public void removeDefaultAcl(final Path path) throws AzureBlobFileSystemExceptio "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } - LOG.debug( - "removeDefaultAcl filesystem: {} path: {}", - client.getFileSystem(), - path.toString()); - try (AbfsPerfInfo perfInfoGet = startTracking("removeDefaultAcl", "getAclStatus")) { + + LOG.debug( + "removeDefaultAcl filesystem: {} path: {}", + client.getFileSystem(), + path.toString()); + final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); perfInfoGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); @@ -936,12 +941,13 @@ public void removeAcl(final Path path) throws AzureBlobFileSystemException { "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } - LOG.debug( - "removeAcl filesystem: {} path: {}", - client.getFileSystem(), - path.toString()); - try (AbfsPerfInfo perfInfoGet = startTracking("removeAcl", "getAclStatus")){ + + LOG.debug( + "removeAcl filesystem: {} path: {}", + client.getFileSystem(), + path.toString()); + final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); perfInfoGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); @@ -969,13 +975,14 @@ public void setAcl(final Path path, final List aclSpec) throws AzureBl "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } - LOG.debug( - "setAcl filesystem: {} path: {} aclspec: {}", - client.getFileSystem(), - path.toString(), - AclEntry.aclSpecToString(aclSpec)); - try (AbfsPerfInfo perfInfoGet = startTracking("setAcl", "getAclStatus")) { + + LOG.debug( + "setAcl filesystem: {} path: {} aclspec: {}", + client.getFileSystem(), + path.toString(), + AclEntry.aclSpecToString(aclSpec)); + identityTransformer.transformAclEntriesForSetRequest(aclSpec); final Map aclEntries = AbfsAclHelper.deserializeAclSpec(AclEntry.aclSpecToString(aclSpec)); final boolean isUpnFormat = AbfsAclHelper.isUpnFormatAclEntries(aclEntries); @@ -1004,25 +1011,27 @@ public AclStatus getAclStatus(final Path path) throws IOException { "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } - LOG.debug( - "getAclStatus filesystem: {} path: {}", - client.getFileSystem(), - path.toString()); - try (AbfsPerfInfo perfInfo = startTracking("getAclStatus", "getAclStatus")) { + + LOG.debug( + "getAclStatus filesystem: {} path: {}", + client.getFileSystem(), + path.toString()); + AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); - perfInfo.registerResult(op.getResult()); + AbfsHttpOperation result = op.getResult(); + perfInfo.registerResult(result); final String transformedOwner = identityTransformer.transformIdentityForGetRequest( - op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_OWNER), + result.getResponseHeader(HttpHeaderConfigurations.X_MS_OWNER), true, userName); final String transformedGroup = identityTransformer.transformIdentityForGetRequest( - op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_GROUP), + result.getResponseHeader(HttpHeaderConfigurations.X_MS_GROUP), false, primaryUserGroup); - final String permissions = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_PERMISSIONS); + final String permissions = result.getResponseHeader(HttpHeaderConfigurations.X_MS_PERMISSIONS); final String aclSpecString = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL); final List aclEntries = AclEntry.parseAclSpec(AbfsAclHelper.processAclString(aclSpecString), true); From 0fd18a004fda1fd2b541df3d534c9d0cc537f5af Mon Sep 17 00:00:00 2001 From: Jeetesh Mangwani Date: Sun, 3 Nov 2019 00:39:21 -0700 Subject: [PATCH 09/13] cut short the lines --- .../fs/azurebfs/AzureBlobFileSystemStore.java | 49 +++++++++++++------ 1 file changed, 35 insertions(+), 14 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index 2b1a96664a1ef..d6997f4fb73cd 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -274,7 +274,8 @@ public AbfsConfiguration getAbfsConfiguration() { } public Hashtable getFilesystemProperties() throws AzureBlobFileSystemException { - try (AbfsPerfInfo perfInfo = startTracking("getFilesystemProperties", "getFilesystemProperties")) { + try (AbfsPerfInfo perfInfo = startTracking("getFilesystemProperties", + "getFilesystemProperties")) { LOG.debug("getFilesystemProperties for filesystem: {}", client.getFileSystem()); @@ -302,7 +303,8 @@ public void setFilesystemProperties(final Hashtable properties) client.getFileSystem(), properties); - try (AbfsPerfInfo perfInfo = startTracking("setFilesystemProperties", "setFilesystemProperties")) { + try (AbfsPerfInfo perfInfo = startTracking("setFilesystemProperties", + "setFilesystemProperties")) { final String commaSeparatedProperties; try { commaSeparatedProperties = convertXmsPropertiesToCommaSeparatedString(properties); @@ -797,7 +799,10 @@ public void setOwner(final Path path, final String owner, final String group) th final String transformedOwner = identityTransformer.transformUserOrGroupForSetRequest(owner); final String transformedGroup = identityTransformer.transformUserOrGroupForSetRequest(group); - final AbfsRestOperation op = client.setOwner(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), transformedOwner, transformedGroup); + final AbfsRestOperation op = client.setOwner( + AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + transformedOwner, + transformedGroup); perfInfo.registerResult(op.getResult()).registerSuccess(true); } @@ -818,7 +823,8 @@ public void setPermission(final Path path, final FsPermission permission) throws path.toString(), permission.toString()); - final AbfsRestOperation op = client.setPermission(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + final AbfsRestOperation op = client.setPermission( + AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), String.format(AbfsHttpConstants.PERMISSION_FORMAT, permission.toOctal())); perfInfo.registerResult(op.getResult()).registerSuccess(true); @@ -855,9 +861,12 @@ public void modifyAclEntries(final Path path, final List aclSpec) thro perfInfoGet.registerSuccess(true).finishTracking(); try (AbfsPerfInfo perfInfoSet = startTracking("modifyAclEntries", "setAcl")) { - final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + final AbfsRestOperation setAclOp + = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), AbfsAclHelper.serializeAclSpec(aclEntries), eTag); - perfInfoSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); + perfInfoSet.registerResult(setAclOp.getResult()) + .registerSuccess(true) + .registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); } } } @@ -891,9 +900,12 @@ public void removeAclEntries(final Path path, final List aclSpec) thro perfInfoGet.registerSuccess(true).finishTracking(); try (AbfsPerfInfo perfInfoSet = startTracking("removeAclEntries", "setAcl")) { - final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + final AbfsRestOperation setAclOp = + client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), AbfsAclHelper.serializeAclSpec(aclEntries), eTag); - perfInfoSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); + perfInfoSet.registerResult(setAclOp.getResult()) + .registerSuccess(true) + .registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); } } } @@ -928,9 +940,12 @@ public void removeDefaultAcl(final Path path) throws AzureBlobFileSystemExceptio perfInfoGet.registerSuccess(true).finishTracking(); try (AbfsPerfInfo perfInfoSet = startTracking("removeDefaultAcl", "setAcl")) { - final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + final AbfsRestOperation setAclOp = + client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), AbfsAclHelper.serializeAclSpec(aclEntries), eTag); - perfInfoSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); + perfInfoSet.registerResult(setAclOp.getResult()) + .registerSuccess(true) + .registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); } } } @@ -962,9 +977,12 @@ public void removeAcl(final Path path) throws AzureBlobFileSystemException { perfInfoGet.registerSuccess(true).finishTracking(); try (AbfsPerfInfo perfInfoSet = startTracking("removeAcl", "setAcl")) { - final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + final AbfsRestOperation setAclOp = + client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), AbfsAclHelper.serializeAclSpec(newAclEntries), eTag); - perfInfoSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); + perfInfoSet.registerResult(setAclOp.getResult()) + .registerSuccess(true) + .registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); } } } @@ -998,9 +1016,12 @@ public void setAcl(final Path path, final List aclSpec) throws AzureBl perfInfoGet.registerSuccess(true).finishTracking(); try (AbfsPerfInfo perfInfoSet = startTracking("setAcl", "setAcl")) { - final AbfsRestOperation setAclOp = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + final AbfsRestOperation setAclOp = + client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), AbfsAclHelper.serializeAclSpec(aclEntries), eTag); - perfInfoSet.registerResult(setAclOp.getResult()).registerSuccess(true).registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); + perfInfoSet.registerResult(setAclOp.getResult()) + .registerSuccess(true) + .registerAggregates(perfInfoGet.getTrackingStart(), GET_SET_AGGREGATE_COUNT); } } } From df2647f4ffa8bc4a8a7120e6417670e2bea54a2d Mon Sep 17 00:00:00 2001 From: Jeetesh Mangwani Date: Wed, 6 Nov 2019 17:02:00 -0800 Subject: [PATCH 10/13] address comments on styles --- .../hadoop/fs/azurebfs/AbfsConfiguration.java | 3 + .../fs/azurebfs/AzureBlobFileSystemStore.java | 14 ++-- .../fs/azurebfs/services/AbfsPerfTracker.java | 82 +++++++++---------- .../azurebfs/services/AbfsRestOperation.java | 2 - .../services/TestAbfsPerfTracker.java | 24 +++--- 5 files changed, 63 insertions(+), 62 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java index bec774977a85e..c5ab6a40ffdce 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java @@ -475,6 +475,9 @@ public boolean isUpnUsed() { return this.useUpn; } + /** + * Whether {@code AbfsClient} should track and send latency info back to storage servers + */ public boolean shouldTrackLatency() { return this.trackLatency; } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index d6997f4fb73cd..43e0fbd8615c2 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -812,7 +812,7 @@ public void setPermission(final Path path, final FsPermission permission) throws AzureBlobFileSystemException { if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } try (AbfsPerfInfo perfInfo = startTracking("setPermission", "setPermission")) { @@ -835,7 +835,7 @@ public void modifyAclEntries(final Path path, final List aclSpec) thro AzureBlobFileSystemException { if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } try (AbfsPerfInfo perfInfoGet = startTracking("modifyAclEntries", "getAclStatus")) { @@ -874,7 +874,7 @@ public void modifyAclEntries(final Path path, final List aclSpec) thro public void removeAclEntries(final Path path, final List aclSpec) throws AzureBlobFileSystemException { if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } try (AbfsPerfInfo perfInfoGet = startTracking("removeAclEntries", "getAclStatus")) { @@ -913,7 +913,7 @@ public void removeAclEntries(final Path path, final List aclSpec) thro public void removeDefaultAcl(final Path path) throws AzureBlobFileSystemException { if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } try (AbfsPerfInfo perfInfoGet = startTracking("removeDefaultAcl", "getAclStatus")) { @@ -953,7 +953,7 @@ public void removeDefaultAcl(final Path path) throws AzureBlobFileSystemExceptio public void removeAcl(final Path path) throws AzureBlobFileSystemException { if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } try (AbfsPerfInfo perfInfoGet = startTracking("removeAcl", "getAclStatus")){ @@ -990,7 +990,7 @@ public void removeAcl(final Path path) throws AzureBlobFileSystemException { public void setAcl(final Path path, final List aclSpec) throws AzureBlobFileSystemException { if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } try (AbfsPerfInfo perfInfoGet = startTracking("setAcl", "getAclStatus")) { @@ -1029,7 +1029,7 @@ public void setAcl(final Path path, final List aclSpec) throws AzureBl public AclStatus getAclStatus(final Path path) throws IOException { if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } try (AbfsPerfInfo perfInfo = startTracking("getAclStatus", "getAclStatus")) { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java index 166a310fdd716..2165eb8ddf51b 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java @@ -78,28 +78,28 @@ public final class AbfsPerfTracker { private static final Logger LOG = LoggerFactory.getLogger(AbfsPerfTracker.class); // the field names of perf log lines - private static final String HostNameKey = "h"; - private static final String TimestampKey = "t"; - private static final String StorageAccountNameKey = "a"; - private static final String ContainerNameKey = "c"; - private static final String CallerMethodNameKey = "cr"; - private static final String CalleeMethodNameKey = "ce"; - private static final String ResultKey = "r"; - private static final String LatencyKey = "l"; - private static final String LatencySumKey = "ls"; - private static final String LatencyCountKey = "lc"; - private static final String HttpStatusCodeKey = "s"; - private static final String ErrorCodeKey = "e"; - private static final String ClientRequestIdKey = "ci"; - private static final String ServerRequestIdKey = "ri"; - private static final String ConnectionTimeKey = "ct"; - private static final String SendingTimeKey = "st"; - private static final String ReceivingTimeKey = "rt"; - private static final String BytesSentKey = "bs"; - private static final String BytesReceivedKey = "br"; - private static final String HttpMethodKey = "m"; - private static final String HttpUrlKey = "u"; - private static final String StringPlaceholder = "%s"; + private static final String HOST_NAME_KEY = "h"; + private static final String TIMESTAMP_KEY = "t"; + private static final String STORAGE_ACCOUNT_NAME_KEY = "a"; + private static final String CONTAINER_NAME_KEY = "c"; + private static final String CALLER_METHOD_NAME_KEY = "cr"; + private static final String CALLEE_METHOD_NAME_KEY = "ce"; + private static final String RESULT_KEY = "r"; + private static final String LATENCY_KEY = "l"; + private static final String LATENCY_SUM_KEY = "ls"; + private static final String LATENCY_COUNT_KEY = "lc"; + private static final String HTTP_STATUS_CODE_KEY = "s"; + private static final String ERROR_CODE_KEY = "e"; + private static final String CLIENT_REQUEST_ID_KEY = "ci"; + private static final String SERVER_REQUEST_ID_KEY = "ri"; + private static final String CONNECTION_TIME_KEY = "ct"; + private static final String SENDING_TIME_KEY = "st"; + private static final String RECEIVING_TIME_KEY = "rt"; + private static final String BYTES_SENT_KEY = "bs"; + private static final String BYTES_RECEIVED_KEY = "br"; + private static final String HTTP_METHOD_KEY = "m"; + private static final String HTTP_URL_KEY = "u"; + private static final String STRING_PLACEHOLDER = "%s"; // the queue to hold latency information private final ConcurrentLinkedQueue queue = new ConcurrentLinkedQueue(); @@ -133,37 +133,37 @@ protected AbfsPerfTracker(String filesystemName, String accountName, boolean ena } String commonReportingFormat = new StringBuilder() - .append(HostNameKey) + .append(HOST_NAME_KEY) .append(AbfsHttpConstants.EQUAL) .append(hostName) .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) - .append(TimestampKey) + .append(TIMESTAMP_KEY) .append(AbfsHttpConstants.EQUAL) - .append(StringPlaceholder) + .append(STRING_PLACEHOLDER) .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) - .append(StorageAccountNameKey) + .append(STORAGE_ACCOUNT_NAME_KEY) .append(AbfsHttpConstants.EQUAL) .append(accountName) .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) - .append(ContainerNameKey) + .append(CONTAINER_NAME_KEY) .append(AbfsHttpConstants.EQUAL) .append(filesystemName) .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) - .append(CallerMethodNameKey) + .append(CALLER_METHOD_NAME_KEY) .append(AbfsHttpConstants.EQUAL) - .append(StringPlaceholder) + .append(STRING_PLACEHOLDER) .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) - .append(CalleeMethodNameKey) + .append(CALLEE_METHOD_NAME_KEY) .append(AbfsHttpConstants.EQUAL) - .append(StringPlaceholder) + .append(STRING_PLACEHOLDER) .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) - .append(ResultKey) + .append(RESULT_KEY) .append(AbfsHttpConstants.EQUAL) - .append(StringPlaceholder) + .append(STRING_PLACEHOLDER) .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) - .append(LatencyKey) + .append(LATENCY_KEY) .append(AbfsHttpConstants.EQUAL) - .append(StringPlaceholder) + .append(STRING_PLACEHOLDER) .toString(); /** @@ -175,7 +175,7 @@ protected AbfsPerfTracker(String filesystemName, String accountName, boolean ena */ singletonLatencyReportingFormat = new StringBuilder() .append(commonReportingFormat) - .append(StringPlaceholder) + .append(STRING_PLACEHOLDER) .toString(); /** @@ -188,14 +188,14 @@ protected AbfsPerfTracker(String filesystemName, String accountName, boolean ena aggregateLatencyReportingFormat = new StringBuilder() .append(commonReportingFormat) .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) - .append(LatencySumKey) + .append(LATENCY_SUM_KEY) .append(AbfsHttpConstants.EQUAL) - .append(StringPlaceholder) + .append(STRING_PLACEHOLDER) .append(AbfsHttpConstants.SINGLE_WHITE_SPACE) - .append(LatencyCountKey) + .append(LATENCY_COUNT_KEY) .append(AbfsHttpConstants.EQUAL) - .append(StringPlaceholder) - .append(StringPlaceholder) + .append(STRING_PLACEHOLDER) + .append(STRING_PLACEHOLDER) .toString(); } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java index d4b96a97affea..a1710688f1924 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java @@ -123,7 +123,6 @@ public AbfsHttpOperation getResult() { void execute() throws AzureBlobFileSystemException { // see if we have latency reports from the previous requests String latencyHeader = this.client.getAbfsPerfTracker().getClientLatency(); - if (latencyHeader != null && !latencyHeader.isEmpty()) { AbfsHttpHeader httpHeader = new AbfsHttpHeader(HttpHeaderConfigurations.X_MS_ABFS_CLIENT_LATENCY, latencyHeader); @@ -131,7 +130,6 @@ void execute() throws AzureBlobFileSystemException { } int retryCount = 0; - while (!executeHttpOperation(retryCount++)) { try { Thread.sleep(client.getRetryPolicy().getRetryInterval(retryCount)); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsPerfTracker.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsPerfTracker.java index 5bedf48a91921..4f4210287ce75 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsPerfTracker.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsPerfTracker.java @@ -162,7 +162,7 @@ public void verifyTrackingForAggregateLatencyRecords() throws Exception { @Test public void verifyRecordingSingletonLatencyIsCheapWhenDisabled() throws Exception { // when latency tracker is disabled, we expect it to take time equivalent to checking a boolean value - final double maxLatencyWhenDisabledMs = 1; + final double maxLatencyWhenDisabledMs = 1000; final double minLatencyWhenDisabledMs = 0; final long numTasks = 1000; long aggregateLatency = 0; @@ -189,7 +189,7 @@ public void verifyRecordingSingletonLatencyIsCheapWhenDisabled() throws Exceptio aggregateLatency += fr.get(); } - double averageRecordLatency = aggregateLatency/numTasks; + double averageRecordLatency = aggregateLatency / numTasks; assertThat(averageRecordLatency).describedAs("Average time for recording singleton latencies should be bounded") .isBetween(minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs); } @@ -197,7 +197,7 @@ public void verifyRecordingSingletonLatencyIsCheapWhenDisabled() throws Exceptio @Test public void verifyRecordingAggregateLatencyIsCheapWhenDisabled() throws Exception { // when latency tracker is disabled, we expect it to take time equivalent to checking a boolean value - final double maxLatencyWhenDisabledMs = 1; + final double maxLatencyWhenDisabledMs = 1000; final double minLatencyWhenDisabledMs = 0; final long numTasks = 1000; long aggregateLatency = 0; @@ -225,7 +225,7 @@ public void verifyRecordingAggregateLatencyIsCheapWhenDisabled() throws Exceptio aggregateLatency += fr.get(); } - double averageRecordLatency = aggregateLatency/numTasks; + double averageRecordLatency = aggregateLatency / numTasks; assertThat(averageRecordLatency).describedAs("Average time for recording aggregate latencies should be bounded") .isBetween(minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs); } @@ -233,7 +233,7 @@ public void verifyRecordingAggregateLatencyIsCheapWhenDisabled() throws Exceptio @Test public void verifyGettingLatencyRecordsIsCheapWhenDisabled() throws Exception { // when latency tracker is disabled, we expect it to take time equivalent to checking a boolean value - final double maxLatencyWhenDisabledMs = 1; + final double maxLatencyWhenDisabledMs = 1000; final double minLatencyWhenDisabledMs = 0; final long numTasks = 1000; long aggregateLatency = 0; @@ -254,14 +254,14 @@ public void verifyGettingLatencyRecordsIsCheapWhenDisabled() throws Exception { aggregateLatency += fr.get(); } - double averageRecordLatency = aggregateLatency/numTasks; + double averageRecordLatency = aggregateLatency / numTasks; assertThat(averageRecordLatency).describedAs("Average time for getting latency records should be bounded") .isBetween(minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs); } @Test public void verifyRecordingSingletonLatencyIsCheapWhenEnabled() throws Exception { - final double maxLatencyWhenDisabledMs = 50; + final double maxLatencyWhenDisabledMs = 5000; final double minLatencyWhenDisabledMs = 0; final long numTasks = 1000; long aggregateLatency = 0; @@ -288,14 +288,14 @@ public void verifyRecordingSingletonLatencyIsCheapWhenEnabled() throws Exception aggregateLatency += fr.get(); } - double averageRecordLatency = aggregateLatency/numTasks; + double averageRecordLatency = aggregateLatency / numTasks; assertThat(averageRecordLatency).describedAs("Average time for recording singleton latencies should be bounded") .isBetween(minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs); } @Test public void verifyRecordingAggregateLatencyIsCheapWhenEnabled() throws Exception { - final double maxLatencyWhenDisabledMs = 50; + final double maxLatencyWhenDisabledMs = 5000; final double minLatencyWhenDisabledMs = 0; final long numTasks = 1000; long aggregateLatency = 0; @@ -323,14 +323,14 @@ public void verifyRecordingAggregateLatencyIsCheapWhenEnabled() throws Exception aggregateLatency += fr.get(); } - double averageRecordLatency = aggregateLatency/numTasks; + double averageRecordLatency = aggregateLatency / numTasks; assertThat(averageRecordLatency).describedAs("Average time for recording aggregate latencies is bounded") .isBetween(minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs); } @Test public void verifyGettingLatencyRecordsIsCheapWhenEnabled() throws Exception { - final double maxLatencyWhenDisabledMs = 50; + final double maxLatencyWhenDisabledMs = 5000; final double minLatencyWhenDisabledMs = 0; final long numTasks = 1000; long aggregateLatency = 0; @@ -351,7 +351,7 @@ public void verifyGettingLatencyRecordsIsCheapWhenEnabled() throws Exception { aggregateLatency += fr.get(); } - double averageRecordLatency = aggregateLatency/numTasks; + double averageRecordLatency = aggregateLatency / numTasks; assertThat(averageRecordLatency).describedAs("Average time for getting latency records should be bounded") .isBetween(minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs); } From 3bcd9367ef9b1094cdb0751dc5d6b44ea1e9a1fd Mon Sep 17 00:00:00 2001 From: Jeetesh Mangwani Date: Wed, 6 Nov 2019 17:11:26 -0800 Subject: [PATCH 11/13] address more nits --- .../java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java | 1 + .../org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java index c5ab6a40ffdce..53fefddec0608 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java @@ -690,4 +690,5 @@ private String appendSlashIfNeeded(String authority) { } return authority; } + } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index 43e0fbd8615c2..51953d4ae0348 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -784,7 +784,7 @@ public void setOwner(final Path path, final String owner, final String group) th AzureBlobFileSystemException { if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( - "This operation is only valid for storage accounts with the hierarchical namespace enabled."); + "This operation is only valid for storage accounts with the hierarchical namespace enabled."); } try (AbfsPerfInfo perfInfo = startTracking("setOwner", "setOwner")) { From f12d371bc55ae445d1968bd2f87568e36708c83c Mon Sep 17 00:00:00 2001 From: Jeetesh Mangwani Date: Thu, 7 Nov 2019 22:49:42 -0800 Subject: [PATCH 12/13] fix checkstyle issues --- .../org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java | 3 +-- .../apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java index 2c5ca6191a376..37cc15fc20fb7 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java @@ -81,8 +81,7 @@ public AbfsPerfInfo registerAggregates(Instant aggregateStart, long aggregateCou return this; } - public AbfsPerfInfo finishTracking() - { + public AbfsPerfInfo finishTracking() { if (this.trackingEnd == null) { this.trackingEnd = abfsPerfTracker.getLatencyInstant(); } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java index 2165eb8ddf51b..4dc99ed1646fb 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java @@ -200,8 +200,7 @@ protected AbfsPerfTracker(String filesystemName, String accountName, boolean ena } } - public void trackInfo(AbfsPerfInfo perfInfo) - { + public void trackInfo(AbfsPerfInfo perfInfo) { if (!enabled) { return; } From e8727a6c2c748d95db2b1fd2249bc889d74a00c5 Mon Sep 17 00:00:00 2001 From: Jeetesh Mangwani Date: Thu, 14 Nov 2019 11:58:25 -0800 Subject: [PATCH 13/13] fix javadoc warnings --- .../hadoop/fs/azurebfs/AbfsConfiguration.java | 4 +++- .../contracts/services/AbfsPerfLoggable.java | 2 ++ .../fs/azurebfs/services/AbfsPerfInfo.java | 18 +++++++++--------- .../fs/azurebfs/services/AbfsPerfTracker.java | 14 +++++++------- 4 files changed, 21 insertions(+), 17 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java index 53fefddec0608..9b8c1569002b0 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java @@ -476,7 +476,9 @@ public boolean isUpnUsed() { } /** - * Whether {@code AbfsClient} should track and send latency info back to storage servers + * Whether {@code AbfsClient} should track and send latency info back to storage servers. + * + * @return a boolean indicating whether latency should be tracked. */ public boolean shouldTrackLatency() { return this.trackLatency; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AbfsPerfLoggable.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AbfsPerfLoggable.java index 5717ee76043a9..772f006182b53 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AbfsPerfLoggable.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AbfsPerfLoggable.java @@ -27,6 +27,8 @@ public interface AbfsPerfLoggable { /** * Gets the string to log to the Abfs Logging API. + * + * @return the string that will be logged. */ String getLogString(); } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java index 37cc15fc20fb7..0e7a111480cc8 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfInfo.java @@ -30,31 +30,31 @@ */ public final class AbfsPerfInfo implements AutoCloseable { - // the tracker which will be extracting perf info out of this object + // the tracker which will be extracting perf info out of this object. private AbfsPerfTracker abfsPerfTracker; - // the caller name + // the caller name. private String callerName; - // the callee name + // the callee name. private String calleeName; - // time when this tracking started + // time when this tracking started. private Instant trackingStart; - // time when this tracking ended + // time when this tracking ended. private Instant trackingEnd; - // whether the tracked request was successful + // whether the tracked request was successful. private boolean success; - // time when the aggregate operation (to which this request belongs) started + // time when the aggregate operation (to which this request belongs) started. private Instant aggregateStart; - // number of requests in the aggregate operation (to which this request belongs) + // number of requests in the aggregate operation (to which this request belongs). private long aggregateCount; - // result of the request + // result of the request. private AbfsPerfLoggable res; public AbfsPerfInfo(AbfsPerfTracker abfsPerfTracker, String callerName, String calleeName) { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java index 4dc99ed1646fb..e24c47b8c7a6a 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsPerfTracker.java @@ -74,10 +74,10 @@ */ public final class AbfsPerfTracker { - // the logger + // the logger. private static final Logger LOG = LoggerFactory.getLogger(AbfsPerfTracker.class); - // the field names of perf log lines + // the field names of perf log lines. private static final String HOST_NAME_KEY = "h"; private static final String TIMESTAMP_KEY = "t"; private static final String STORAGE_ACCOUNT_NAME_KEY = "a"; @@ -101,19 +101,19 @@ public final class AbfsPerfTracker { private static final String HTTP_URL_KEY = "u"; private static final String STRING_PLACEHOLDER = "%s"; - // the queue to hold latency information + // the queue to hold latency information. private final ConcurrentLinkedQueue queue = new ConcurrentLinkedQueue(); - // whether the latency tracker has been enabled + // whether the latency tracker has been enabled. private boolean enabled = false; - // the host name + // the host name. private String hostName; - // singleton latency reporting format + // singleton latency reporting format. private String singletonLatencyReportingFormat; - // aggregate latency reporting format + // aggregate latency reporting format. private String aggregateLatencyReportingFormat; public AbfsPerfTracker(String filesystemName, String accountName, AbfsConfiguration configuration) {