diff --git a/LICENSE-binary b/LICENSE-binary index 317698b43f36b..e548f768431a9 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -361,7 +361,7 @@ org.objenesis:objenesis:2.6 org.xerial.snappy:snappy-java:1.1.10.4 org.yaml:snakeyaml:2.0 org.wildfly.openssl:wildfly-openssl:2.1.4.Final -software.amazon.awssdk:bundle:2.25.53 +software.amazon.awssdk:bundle:2.29.52 -------------------------------------------------------------------------------- diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index b848d26653d14..b07c91e7868eb 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -204,7 +204,7 @@ 1.0-beta-1 900 1.12.720 - 2.25.53 + 2.29.52 3.1.1 0.0.4 1.0.1 diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java index 1acb2b9945ee1..f52093abbe78a 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java @@ -38,6 +38,7 @@ import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; +import software.amazon.awssdk.metrics.LoggingMetricPublisher; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3AsyncClientBuilder; @@ -201,12 +202,20 @@ private , ClientT> Build final ClientOverrideConfiguration.Builder override = createClientOverrideConfiguration(parameters, conf); - S3BaseClientBuilder s3BaseClientBuilder = builder + S3BaseClientBuilder s3BaseClientBuilder = builder .overrideConfiguration(override.build()) .credentialsProvider(parameters.getCredentialSet()) .disableS3ExpressSessionAuth(!parameters.isExpressCreateSession()) .serviceConfiguration(serviceConfiguration); + if (LOG.isTraceEnabled()) { + // if this log is set to "trace" then we turn on logging of SDK metrics. + // The metrics itself will log at info; it is just that reflection work + // would be needed to change that setting safely for shaded and unshaded aws artifacts. + s3BaseClientBuilder.overrideConfiguration(o -> + o.addMetricPublisher(LoggingMetricPublisher.create())); + } + if (conf.getBoolean(HTTP_SIGNER_ENABLED, HTTP_SIGNER_ENABLED_DEFAULT)) { // use an http signer through an AuthScheme final AuthScheme signer = diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/AwsSdkWorkarounds.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/AwsSdkWorkarounds.java index a0673b123b2b1..fabb543bd9047 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/AwsSdkWorkarounds.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/AwsSdkWorkarounds.java @@ -18,9 +18,10 @@ package org.apache.hadoop.fs.s3a.impl; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.fs.s3a.impl.logging.LogControl; -import org.apache.hadoop.fs.s3a.impl.logging.LogControllerFactory; /** * This class exists to support workarounds for parts of the AWS SDK @@ -35,16 +36,20 @@ public final class AwsSdkWorkarounds { public static final String TRANSFER_MANAGER = "software.amazon.awssdk.transfer.s3.S3TransferManager"; + private static final Logger LOG = LoggerFactory.getLogger(AwsSdkWorkarounds.class); + private AwsSdkWorkarounds() { } /** * Prepare logging before creating AWS clients. + * There is currently no logging to require tuning, + * so this only logs at trace that it was invoked. * @return true if the log tuning operation took place. */ public static boolean prepareLogging() { - return LogControllerFactory.createController(). - setLogLevel(TRANSFER_MANAGER, LogControl.LogLevel.ERROR); + LOG.trace("prepareLogging()"); + return true; } /** @@ -53,7 +58,6 @@ public static boolean prepareLogging() { */ @VisibleForTesting static boolean restoreNoisyLogging() { - return LogControllerFactory.createController(). - setLogLevel(TRANSFER_MANAGER, LogControl.LogLevel.INFO); + return true; } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/UploadContentProviders.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/UploadContentProviders.java index d1fb28257f2ab..9716a09158eea 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/UploadContentProviders.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/UploadContentProviders.java @@ -286,7 +286,9 @@ public final InputStream newStream() { // the stream has been recreated for the first time. // notify only once for this stream, so as not to flood // the logs. - LOG.info("Stream recreated: {}", this); + // originally logged at info; logs at debug because HADOOP-19516 + // means that this message is very common with S3 Express stores. + LOG.debug("Stream recreated: {}", this); } return setCurrentStream(createNewStream()); } diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/third_party_stores.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/third_party_stores.md index 1018ec9e7d6c2..feffdf0a8b049 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/third_party_stores.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/third_party_stores.md @@ -40,6 +40,7 @@ The features which may be unavailable include: This is now the default -do not change it. * List API to use (`fs.s3a.list.version = 1`) * Bucket lifecycle rules to clean up pending uploads. +* Support for multipart uploads. ### Disabling Change Detection @@ -409,7 +410,7 @@ which is a subset of the AWS API. To get a compatible access and secret key, follow the instructions of [Simple migration from Amazon S3 to Cloud Storage](https://cloud.google.com/storage/docs/aws-simple-migration#defaultproj). -Here are the per-bucket setings for an example bucket "gcs-container" +Here are the per-bucket settings for an example bucket "gcs-container" in Google Cloud Storage. Note the multiobject delete option must be disabled; this makes renaming and deleting significantly slower. @@ -452,11 +453,21 @@ this makes renaming and deleting significantly slower. true + fs.s3a.bucket.gcs-container.endpoint.region - dummy + gcs + + + fs.s3a.multipart.uploads.enabled + false + + + fs.s3a.optimized.copy.from.local.enabled + false + ``` diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md index 55ebf7614c17f..d9336846342cb 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md @@ -1359,6 +1359,48 @@ execchain.MainClientExec (MainClientExec.java:execute(284)) - Connection can be ``` + +To log the output of the AWS SDK metrics, set the log +`org.apache.hadoop.fs.s3a.DefaultS3ClientFactory` to `TRACE`. +This will then turn on logging of the internal SDK metrics.4 + +These will actually be logged at INFO in the log +``` +software.amazon.awssdk.metrics.LoggingMetricPublisher +``` + +```text +INFO metrics.LoggingMetricPublisher (LoggerAdapter.java:info(165)) - Metrics published: +MetricCollection(name=ApiCall, metrics=[ +MetricRecord(metric=MarshallingDuration, value=PT0.000092041S), +MetricRecord(metric=RetryCount, value=0), +MetricRecord(metric=ApiCallSuccessful, value=true), +MetricRecord(metric=OperationName, value=DeleteObject), +MetricRecord(metric=EndpointResolveDuration, value=PT0.000132792S), +MetricRecord(metric=ApiCallDuration, value=PT0.064890875S), +MetricRecord(metric=CredentialsFetchDuration, value=PT0.000017458S), +MetricRecord(metric=ServiceEndpoint, value=https://buckets3.eu-west-2.amazonaws.com), +MetricRecord(metric=ServiceId, value=S3)], children=[ +MetricCollection(name=ApiCallAttempt, metrics=[ + MetricRecord(metric=TimeToFirstByte, value=PT0.06260225S), + MetricRecord(metric=SigningDuration, value=PT0.000293083S), + MetricRecord(metric=ReadThroughput, value=0.0), + MetricRecord(metric=ServiceCallDuration, value=PT0.06260225S), + MetricRecord(metric=HttpStatusCode, value=204), + MetricRecord(metric=BackoffDelayDuration, value=PT0S), + MetricRecord(metric=TimeToLastByte, value=PT0.064313667S), + MetricRecord(metric=AwsRequestId, value=RKZD44SE5DW91K1G)], children=[ + MetricCollection(name=HttpClient, metrics=[ + MetricRecord(metric=AvailableConcurrency, value=1), + MetricRecord(metric=LeasedConcurrency, value=0), + MetricRecord(metric=ConcurrencyAcquireDuration, value=PT0S), + MetricRecord(metric=PendingConcurrencyAcquires, value=0), + MetricRecord(metric=MaxConcurrency, value=512), + MetricRecord(metric=HttpClientName, value=Apache)], children=[]) + ]) + ]) +``` + ### Enable S3 Server-side Logging The [Auditing](auditing) feature of the S3A connector can be used to generate diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java index 97af80e70a542..e6dabc91bacce 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java @@ -589,6 +589,8 @@ public void testS3SpecificSignerOverride() throws Exception { config.set(AWS_REGION, EU_WEST_1); disableFilesystemCaching(config); fs = S3ATestUtils.createTestFileSystem(config); + assumeStoreAwsHosted(fs); + S3Client s3Client = getS3Client("testS3SpecificSignerOverride"); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java index 8671d962175f7..0f79881466f1e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java @@ -39,6 +39,7 @@ import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM; import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_KEY; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.assumeStoreAwsHosted; import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestBucketName; import static org.apache.hadoop.fs.s3a.S3ATestUtils.maybeSkipRootTests; import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; @@ -101,6 +102,7 @@ public void setup() throws Exception { // although not a root dir test, this confuses paths enough it shouldn't be run in // parallel with other jobs maybeSkipRootTests(getConfiguration()); + assumeStoreAwsHosted(getFileSystem()); } @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEndpointRegion.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEndpointRegion.java index 07caeb02f416a..62f2ffbc0df5d 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEndpointRegion.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEndpointRegion.java @@ -24,9 +24,9 @@ import java.nio.file.AccessDeniedException; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.atomic.AtomicReference; import org.assertj.core.api.Assertions; -import org.junit.Ignore; import org.junit.Test; import software.amazon.awssdk.awscore.AwsExecutionAttribute; import software.amazon.awssdk.awscore.exception.AwsServiceException; @@ -55,6 +55,7 @@ import static org.apache.hadoop.fs.s3a.Constants.S3_ENCRYPTION_ALGORITHM; import static org.apache.hadoop.fs.s3a.DefaultS3ClientFactory.ERROR_ENDPOINT_WITH_FIPS; import static org.apache.hadoop.fs.s3a.S3ATestUtils.assume; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.assumeStoreAwsHosted; import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; import static org.apache.hadoop.fs.s3a.test.PublicDatasetTestUtils.DEFAULT_REQUESTER_PAYS_BUCKET_NAME; import static org.apache.hadoop.io.IOUtils.closeStream; @@ -106,6 +107,10 @@ public class ITestS3AEndpointRegion extends AbstractS3ATestBase { public static final String EXCEPTION_THROWN_BY_INTERCEPTOR = "Exception thrown by interceptor"; + /** + * Text to include in assertions. + */ + private static final AtomicReference EXPECTED_MESSAGE = new AtomicReference<>(); /** * New FS instance which will be closed in teardown. */ @@ -477,6 +482,7 @@ public void testCentralEndpointAndNullRegionWithCRUD() throws Throwable { describe("Access the test bucket using central endpoint and" + " null region, perform file system CRUD operations"); final Configuration conf = getConfiguration(); + assumeStoreAwsHosted(getFileSystem()); final Configuration newConf = new Configuration(conf); @@ -499,6 +505,7 @@ public void testCentralEndpointAndNullRegionWithCRUD() throws Throwable { public void testCentralEndpointAndNullRegionFipsWithCRUD() throws Throwable { describe("Access the test bucket using central endpoint and" + " null region and fips enabled, perform file system CRUD operations"); + assumeStoreAwsHosted(getFileSystem()); final String bucketLocation = getFileSystem().getBucketLocation(); assume("FIPS can be enabled to access buckets from US or Canada endpoints only", @@ -576,7 +583,7 @@ private void assertOpsUsingNewFs() throws IOException { .isFalse(); } - private final class RegionInterceptor implements ExecutionInterceptor { + private static final class RegionInterceptor implements ExecutionInterceptor { private final String endpoint; private final String region; private final boolean isFips; @@ -591,28 +598,49 @@ private final class RegionInterceptor implements ExecutionInterceptor { public void beforeExecution(Context.BeforeExecution context, ExecutionAttributes executionAttributes) { - if (endpoint != null && !endpoint.endsWith(CENTRAL_ENDPOINT)) { - Assertions.assertThat( - executionAttributes.getAttribute(AwsExecutionAttribute.ENDPOINT_OVERRIDDEN)) - .describedAs("Endpoint not overridden").isTrue(); - Assertions.assertThat( - executionAttributes.getAttribute(AwsExecutionAttribute.CLIENT_ENDPOINT).toString()) - .describedAs("There is an endpoint mismatch").isEqualTo("https://" + endpoint); + // extract state from the execution attributes. + final Boolean endpointOveridden = + executionAttributes.getAttribute(AwsExecutionAttribute.ENDPOINT_OVERRIDDEN); + final String clientEndpoint = + executionAttributes.getAttribute(AwsExecutionAttribute.CLIENT_ENDPOINT).toString(); + final Boolean fipsEnabled = executionAttributes.getAttribute( + AwsExecutionAttribute.FIPS_ENDPOINT_ENABLED); + final String reg = executionAttributes.getAttribute(AwsExecutionAttribute.AWS_REGION). + toString(); + + String state = "SDK beforeExecution callback; " + + "endpointOveridden=" + endpointOveridden + + "; clientEndpoint=" + clientEndpoint + + "; fipsEnabled=" + fipsEnabled + + "; region=" + reg; + + if (endpoint != null && !endpoint.endsWith(CENTRAL_ENDPOINT)) { + Assertions.assertThat(endpointOveridden) + .describedAs("Endpoint not overridden in %s. Client Config=%s", + state, EXPECTED_MESSAGE.get()) + .isTrue(); + + Assertions.assertThat(clientEndpoint) + .describedAs("There is an endpoint mismatch in %s. Client Config=%s", + state, EXPECTED_MESSAGE.get()) + .isEqualTo("https://" + endpoint); } else { - Assertions.assertThat( - executionAttributes.getAttribute(AwsExecutionAttribute.ENDPOINT_OVERRIDDEN)) - .describedAs("Endpoint is overridden").isEqualTo(null); + Assertions.assertThat(endpointOveridden) + .describedAs("Attribute endpointOveridden is null in %s. Client Config=%s", + state, EXPECTED_MESSAGE.get()) + .isEqualTo(false); } - Assertions.assertThat( - executionAttributes.getAttribute(AwsExecutionAttribute.AWS_REGION).toString()) - .describedAs("Incorrect region set").isEqualTo(region); + Assertions.assertThat(reg) + .describedAs("Incorrect region set in %s. Client Config=%s", + state, EXPECTED_MESSAGE.get()) + .isEqualTo(region); // verify the fips state matches expectation. - Assertions.assertThat(executionAttributes.getAttribute( - AwsExecutionAttribute.FIPS_ENDPOINT_ENABLED)) - .describedAs("Incorrect FIPS flag set in execution attributes") + Assertions.assertThat(fipsEnabled) + .describedAs("Incorrect FIPS flag set in %s; Client Config=%s", + state, EXPECTED_MESSAGE.get()) .isNotNull() .isEqualTo(isFips); @@ -637,6 +665,11 @@ private S3Client createS3Client(Configuration conf, String endpoint, String configuredRegion, String expectedRegion, boolean isFips) throws IOException { + String expected = + "endpoint=" + endpoint + "; region=" + configuredRegion + + "; expectedRegion=" + expectedRegion + "; isFips=" + isFips; + LOG.info("Creating S3 client with {}", expected); + EXPECTED_MESSAGE.set(expected); List interceptors = new ArrayList<>(); interceptors.add(new RegionInterceptor(endpoint, expectedRegion, isFips)); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java index 6570b3ee056e1..1be33d1ff8ecc 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java @@ -1170,7 +1170,7 @@ public static void assumeNotS3ExpressFileSystem(final FileSystem fs) { */ public static void assumeStoreAwsHosted(final FileSystem fs) { assume("store is not AWS S3", - !NetworkBinding.isAwsEndpoint(fs.getConf() + NetworkBinding.isAwsEndpoint(fs.getConf() .getTrimmed(ENDPOINT, DEFAULT_ENDPOINT))); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestAwsSdkWorkarounds.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestAwsSdkWorkarounds.java index ed7a32928b8bf..d18a722a0e2cc 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestAwsSdkWorkarounds.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestAwsSdkWorkarounds.java @@ -32,12 +32,9 @@ import static org.apache.hadoop.test.GenericTestUtils.LogCapturer.captureLogs; /** - * Verify that noisy transfer manager logs are turned off. + * Tests for any AWS SDK workaround code. *

- * This is done by creating new FS instances and then - * requesting an on-demand transfer manager from the store. - * As this is only done once per FS instance, a new FS is - * required per test case. + * These tests are inevitably brittle against SDK updates. */ public class ITestAwsSdkWorkarounds extends AbstractS3ATestBase { @@ -53,13 +50,6 @@ public class ITestAwsSdkWorkarounds extends AbstractS3ATestBase { private static final Logger XFER_LOG = LoggerFactory.getLogger(AwsSdkWorkarounds.TRANSFER_MANAGER); - /** - * This is the string which keeps being printed. - * {@value}. - */ - private static final String FORBIDDEN = - "The provided S3AsyncClient is an instance of MultipartS3AsyncClient"; - /** * Marginal test run speedup by skipping needless test dir cleanup. * @throws IOException failure @@ -70,23 +60,7 @@ protected void deleteTestDirInTeardown() throws IOException { } /** - * Test instantiation with logging disabled. - */ - @Test - public void testQuietLogging() throws Throwable { - // simulate the base state of logging - noisyLogging(); - // creating a new FS switches to quiet logging - try (S3AFileSystem newFs = newFileSystem()) { - String output = createAndLogTransferManager(newFs); - Assertions.assertThat(output) - .describedAs("LOG output") - .doesNotContain(FORBIDDEN); - } - } - - /** - * Test instantiation with logging disabled. + * Test instantiation with logging enabled. */ @Test public void testNoisyLogging() throws Throwable { @@ -95,9 +69,8 @@ public void testNoisyLogging() throws Throwable { noisyLogging(); String output = createAndLogTransferManager(newFs); Assertions.assertThat(output) - .describedAs("LOG output does not contain the forbidden text." - + " Has the SDK been fixed?") - .contains(FORBIDDEN); + .describedAs("LOG output") + .isEmpty(); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestBucketTool.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestBucketTool.java index 50ffce7d87a96..b37e6eec7c822 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestBucketTool.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestBucketTool.java @@ -157,6 +157,7 @@ public void testSimpleBucketWithZoneParam() throws Throwable { @Test public void testS3ExpressBucketWithoutZoneParam() throws Throwable { + assumeStoreAwsHosted(getFileSystem()); expectErrorCode(EXIT_USAGE, intercept(ExitUtil.ExitException.class, NO_ZONE_SUPPLIED, () -> bucketTool.exec("bucket", d(CREATE), diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/sdk/TestAWSV2SDK.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/sdk/TestAWSV2SDK.java index fca9fcc300cbd..1ddd3377cf0a8 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/sdk/TestAWSV2SDK.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/sdk/TestAWSV2SDK.java @@ -57,7 +57,7 @@ public void testShadedClasses() throws IOException { assertThat(v2ClassPath) .as("AWS V2 SDK should be present on the classpath").isNotNull(); List listOfV2SdkClasses = getClassNamesFromJarFile(v2ClassPath); - String awsSdkPrefix = "software/amazon/awssdk"; + String awsSdkPrefix = "software/amazon/"; List unshadedClasses = new ArrayList<>(); for (String awsSdkClass : listOfV2SdkClasses) { if (!awsSdkClass.startsWith(awsSdkPrefix)) { diff --git a/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties b/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties index 7b8dd3c11fcdc..f61668643a1b5 100644 --- a/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties +++ b/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties @@ -102,3 +102,7 @@ log4j.logger.org.apache.hadoop.fs.s3a.S3AStorageStatistics=INFO # services it launches itself. # log4.logger.org.apache.hadoop.service=DEBUG +# log this at trace to trigger enabling printing of the low-level +# performance metrics in the AWS SDK itself. +# log4j.logger.org.apache.hadoop.fs.s3a.DefaultS3ClientFactory=TRACE +