Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions docs/plugins/repository-s3.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,17 @@ https://aws.amazon.com/blogs/aws/amazon-s3-path-deprecation-plan-the-rest-of-the
path style access pattern. If your deployment requires the path style access
pattern then you should set this setting to `true` when upgrading.

`disable_chunked_encoding`::

Whether chunked encoding should be disabled or not. If `false`, chunked
encoding is enabled and will be used where appropriate. If `true`, chunked
encoding is disabled and will not be used, which may mean that snapshot
operations consume more resources and take longer to complete. It should
only be set to `true` if you are using a storage service that does not
support chunked encoding. See the
https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/s3/AmazonS3Builder.html#disableChunkedEncoding--[AWS
Java SDK documentation] for details. Defaults to `false`.

[float]
[[repository-s3-compatible-services]]
===== S3-compatible services
Expand Down
8 changes: 6 additions & 2 deletions plugins/repository-s3/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,8 @@ String s3EC2BasePath = System.getenv("amazon_s3_base_path_ec2")
String s3ECSBucket = System.getenv("amazon_s3_bucket_ecs")
String s3ECSBasePath = System.getenv("amazon_s3_base_path_ecs")

boolean s3DisableChunkedEncoding = (new Random(Long.parseUnsignedLong(project.rootProject.testSeed.tokenize(':').get(0), 16))).nextBoolean()

// If all these variables are missing then we are testing against the internal fixture instead, which has the following
// credentials hard-coded in.

Expand Down Expand Up @@ -229,7 +231,8 @@ task s3FixtureProperties {
"s3Fixture.temporary_key" : s3TemporaryAccessKey,
"s3Fixture.temporary_session_token": s3TemporarySessionToken,
"s3Fixture.ec2_bucket_name" : s3EC2Bucket,
"s3Fixture.ecs_bucket_name" : s3ECSBucket
"s3Fixture.ecs_bucket_name" : s3ECSBucket,
"s3Fixture.disableChunkedEncoding" : s3DisableChunkedEncoding
]

doLast {
Expand Down Expand Up @@ -257,7 +260,8 @@ processTestResources {
'ec2_bucket': s3EC2Bucket,
'ec2_base_path': s3EC2BasePath,
'ecs_bucket': s3ECSBucket,
'ecs_base_path': s3ECSBasePath
'ecs_base_path': s3ECSBasePath,
'disable_chunked_encoding': s3DisableChunkedEncoding,
]
inputs.properties(expansions)
MavenFilteringHack.filter(it, expansions)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,10 @@ final class S3ClientSettings {
static final Setting.AffixSetting<Boolean> USE_PATH_STYLE_ACCESS = Setting.affixKeySetting(PREFIX, "path_style_access",
key -> Setting.boolSetting(key, false, Property.NodeScope));

/** Whether chunked encoding should be disabled or not (Default is false). */
static final Setting.AffixSetting<Boolean> DISABLE_CHUNKED_ENCODING = Setting.affixKeySetting(PREFIX, "disable_chunked_encoding",
key -> Setting.boolSetting(key, false, Property.NodeScope));

/** Credentials to authenticate with s3. */
final S3BasicCredentials credentials;

Expand Down Expand Up @@ -134,10 +138,13 @@ final class S3ClientSettings {
/** Whether the s3 client should use path style access. */
final boolean pathStyleAccess;

/** Whether chunked encoding should be disabled or not. */
final boolean disableChunkedEncoding;

private S3ClientSettings(S3BasicCredentials credentials, String endpoint, Protocol protocol,
String proxyHost, int proxyPort, String proxyUsername, String proxyPassword,
int readTimeoutMillis, int maxRetries, boolean throttleRetries,
boolean pathStyleAccess) {
boolean pathStyleAccess, boolean disableChunkedEncoding) {
this.credentials = credentials;
this.endpoint = endpoint;
this.protocol = protocol;
Expand All @@ -149,6 +156,7 @@ private S3ClientSettings(S3BasicCredentials credentials, String endpoint, Protoc
this.maxRetries = maxRetries;
this.throttleRetries = throttleRetries;
this.pathStyleAccess = pathStyleAccess;
this.disableChunkedEncoding = disableChunkedEncoding;
}

/**
Expand All @@ -172,6 +180,8 @@ S3ClientSettings refine(RepositoryMetaData metadata) {
final int newMaxRetries = getRepoSettingOrDefault(MAX_RETRIES_SETTING, normalizedSettings, maxRetries);
final boolean newThrottleRetries = getRepoSettingOrDefault(USE_THROTTLE_RETRIES_SETTING, normalizedSettings, throttleRetries);
final boolean usePathStyleAccess = getRepoSettingOrDefault(USE_PATH_STYLE_ACCESS, normalizedSettings, pathStyleAccess);
final boolean newDisableChunkedEncoding = getRepoSettingOrDefault(
DISABLE_CHUNKED_ENCODING, normalizedSettings, disableChunkedEncoding);
final S3BasicCredentials newCredentials;
if (checkDeprecatedCredentials(repoSettings)) {
newCredentials = loadDeprecatedCredentials(repoSettings);
Expand All @@ -180,7 +190,8 @@ S3ClientSettings refine(RepositoryMetaData metadata) {
}
if (Objects.equals(endpoint, newEndpoint) && protocol == newProtocol && Objects.equals(proxyHost, newProxyHost)
&& proxyPort == newProxyPort && newReadTimeoutMillis == readTimeoutMillis && maxRetries == newMaxRetries
&& newThrottleRetries == throttleRetries && Objects.equals(credentials, newCredentials)) {
&& newThrottleRetries == throttleRetries && Objects.equals(credentials, newCredentials)
&& newDisableChunkedEncoding == disableChunkedEncoding) {
return this;
}
return new S3ClientSettings(
Expand All @@ -194,7 +205,8 @@ S3ClientSettings refine(RepositoryMetaData metadata) {
newReadTimeoutMillis,
newMaxRetries,
newThrottleRetries,
usePathStyleAccess
usePathStyleAccess,
newDisableChunkedEncoding
);
}

Expand Down Expand Up @@ -282,7 +294,8 @@ static S3ClientSettings getClientSettings(final Settings settings, final String
Math.toIntExact(getConfigValue(settings, clientName, READ_TIMEOUT_SETTING).millis()),
getConfigValue(settings, clientName, MAX_RETRIES_SETTING),
getConfigValue(settings, clientName, USE_THROTTLE_RETRIES_SETTING),
getConfigValue(settings, clientName, USE_PATH_STYLE_ACCESS)
getConfigValue(settings, clientName, USE_PATH_STYLE_ACCESS),
getConfigValue(settings, clientName, DISABLE_CHUNKED_ENCODING)
);
}
}
Expand All @@ -305,13 +318,14 @@ public boolean equals(final Object o) {
protocol == that.protocol &&
Objects.equals(proxyHost, that.proxyHost) &&
Objects.equals(proxyUsername, that.proxyUsername) &&
Objects.equals(proxyPassword, that.proxyPassword);
Objects.equals(proxyPassword, that.proxyPassword) &&
Objects.equals(disableChunkedEncoding, that.disableChunkedEncoding);
}

@Override
public int hashCode() {
return Objects.hash(credentials, endpoint, protocol, proxyHost, proxyPort, proxyUsername, proxyPassword,
readTimeoutMillis, maxRetries, throttleRetries);
readTimeoutMillis, maxRetries, throttleRetries, disableChunkedEncoding);
}

private static <T> T getConfigValue(Settings settings, String clientName,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,9 @@ AmazonS3 buildClient(final S3ClientSettings clientSettings) {
if (clientSettings.pathStyleAccess) {
builder.enablePathStyleAccess();
}
if (clientSettings.disableChunkedEncoding) {
builder.disableChunkedEncoding();
}
return builder.build();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.test.fixture.AbstractHttpFixture;
import com.amazonaws.util.DateUtils;
import com.amazonaws.util.IOUtils;

import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.Streams;
Expand Down Expand Up @@ -75,6 +76,7 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
/** Request handlers for the requests made by the S3 client **/
private final PathTrie<RequestHandler> handlers;

private final boolean disableChunkedEncoding;
/**
* Creates a {@link AmazonS3Fixture}
*/
Expand All @@ -92,6 +94,8 @@ private AmazonS3Fixture(final String workingDir, Properties properties) {
randomAsciiAlphanumOfLength(random, 10), randomAsciiAlphanumOfLength(random, 10));

this.handlers = defaultHandlers(buckets, ec2Bucket, ecsBucket);

this.disableChunkedEncoding = Boolean.parseBoolean(prop(properties, "s3Fixture.disableChunkedEncoding"));
}

private static String nonAuthPath(Request request) {
Expand Down Expand Up @@ -216,13 +220,16 @@ private PathTrie<RequestHandler> defaultHandlers(final Map<String, Bucket> bucke

final String destObjectName = objectName(request.getParameters());

// This is a chunked upload request. We should have the header "Content-Encoding : aws-chunked,gzip"
// to detect it but it seems that the AWS SDK does not follow the S3 guidelines here.
//
// See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
//
String headerDecodedContentLength = request.getHeader("X-amz-decoded-content-length");
if (headerDecodedContentLength != null) {
if (disableChunkedEncoding) {
return newInternalError(request.getId(), "Something is wrong with this PUT request");
}
// This is a chunked upload request. We should have the header "Content-Encoding : aws-chunked,gzip"
// to detect it but it seems that the AWS SDK does not follow the S3 guidelines here.
//
// See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
//
int contentLength = Integer.valueOf(headerDecodedContentLength);

// Chunked requests have a payload like this:
Expand All @@ -246,9 +253,18 @@ private PathTrie<RequestHandler> defaultHandlers(final Map<String, Bucket> bucke
destBucket.objects.put(destObjectName, bytes);
return new Response(RestStatus.OK.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE);
}
}
} else {
if (disableChunkedEncoding == false) {
return newInternalError(request.getId(), "Something is wrong with this PUT request");
}
// Read from body directly
try (BufferedInputStream inputStream = new BufferedInputStream(new ByteArrayInputStream(request.getBody()))) {
byte[] bytes = IOUtils.toByteArray(inputStream);

return newInternalError(request.getId(), "Something is wrong with this PUT request");
destBucket.objects.put(destObjectName, bytes);
return new Response(RestStatus.OK.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE);
}
}
})
);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,4 +151,11 @@ public void testPathStyleAccessCanBeSet() {
assertThat(settings.get("default").pathStyleAccess, is(false));
assertThat(settings.get("other").pathStyleAccess, is(true));
}

public void testUseChunkedEncodingCanBeSet() {
final Map<String, S3ClientSettings> settings = S3ClientSettings.load(
Settings.builder().put("s3.client.other.disable_chunked_encoding", true).build());
assertThat(settings.get("default").disableChunkedEncoding, is(false));
assertThat(settings.get("other").disableChunkedEncoding, is(true));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ setup:
base_path: "${permanent_base_path}"
canned_acl: private
storage_class: standard
disable_chunked_encoding: ${disable_chunked_encoding}

# Remove the snapshots, if a previous test failed to delete them. This is
# useful for third party tests that runs the test against a real external service.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ setup:
base_path: "${temporary_base_path}"
canned_acl: private
storage_class: standard
disable_chunked_encoding: ${disable_chunked_encoding}

---
"Snapshot and Restore with repository-s3 using temporary credentials":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ setup:
base_path: "${ec2_base_path}"
canned_acl: private
storage_class: standard
disable_chunked_encoding: ${disable_chunked_encoding}

---
"Snapshot and Restore with repository-s3 using ec2 credentials":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ setup:
base_path: "${ecs_base_path}"
canned_acl: private
storage_class: standard
disable_chunked_encoding: ${disable_chunked_encoding}

---
"Snapshot and Restore with repository-s3 using ecs credentials":
Expand Down