From 630757ff28ad8fc819296f3ab60642671ceec757 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Mon, 29 Jan 2018 18:05:51 +0200 Subject: [PATCH 01/21] Simplest action to push password to all the nodes --- .../elasticsearch/action/ActionModule.java | 6 ++ .../cluster/reinit/NodesReInitAction.java | 43 ++++++++ .../cluster/reinit/NodesReInitRequest.java | 78 +++++++++++++++ .../reinit/NodesReInitRequestBuilder.java | 91 +++++++++++++++++ .../cluster/reinit/NodesReInitResponse.java | 99 +++++++++++++++++++ .../reinit/TransportNodesReInitAction.java | 97 ++++++++++++++++++ .../client/ClusterAdminClient.java | 7 +- .../client/support/AbstractClient.java | 7 ++ .../admin/cluster/RestReInitAction.java | 82 +++++++++++++++ 9 files changed, 509 insertions(+), 1 deletion(-) create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitAction.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequest.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequestBuilder.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitResponse.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/TransportNodesReInitAction.java create mode 100644 server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReInitAction.java diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 872c217f98091..604c7a200a644 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -39,6 +39,8 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageAction; import org.elasticsearch.action.admin.cluster.node.usage.TransportNodesUsageAction; +import org.elasticsearch.action.admin.cluster.reinit.NodesReInitAction; +import org.elasticsearch.action.admin.cluster.reinit.TransportNodesReInitAction; import org.elasticsearch.action.admin.cluster.remote.RemoteInfoAction; import org.elasticsearch.action.admin.cluster.remote.TransportRemoteInfoAction; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; @@ -237,6 +239,7 @@ import org.elasticsearch.rest.action.admin.cluster.RestPendingClusterTasksAction; import org.elasticsearch.rest.action.admin.cluster.RestPutRepositoryAction; import org.elasticsearch.rest.action.admin.cluster.RestPutStoredScriptAction; +import org.elasticsearch.rest.action.admin.cluster.RestReInitAction; import org.elasticsearch.rest.action.admin.cluster.RestRemoteClusterInfoAction; import org.elasticsearch.rest.action.admin.cluster.RestRestoreSnapshotAction; import org.elasticsearch.rest.action.admin.cluster.RestSnapshotsStatusAction; @@ -491,6 +494,7 @@ public void reg actions.register(ExplainAction.INSTANCE, TransportExplainAction.class); actions.register(ClearScrollAction.INSTANCE, TransportClearScrollAction.class); actions.register(RecoveryAction.INSTANCE, TransportRecoveryAction.class); + actions.register(NodesReInitAction.INSTANCE, TransportNodesReInitAction.class); //Indexed scripts actions.register(PutStoredScriptAction.INSTANCE, TransportPutStoredScriptAction.class); @@ -607,6 +611,8 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestRecoveryAction(settings, restController)); + registerHandler.accept(new RestReInitAction(settings, restController)); + // Scripts API registerHandler.accept(new RestGetStoredScriptAction(settings, restController)); registerHandler.accept(new RestPutStoredScriptAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitAction.java new file mode 100644 index 0000000000000..0bfe3d08604af --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitAction.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.reinit; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class NodesReInitAction extends Action { + + public static final NodesReInitAction INSTANCE = new NodesReInitAction(); + public static final String NAME = "cluster:admin/reinit"; + + private NodesReInitAction() { + super(NAME); + } + + @Override + public NodesReInitResponse newResponse() { + return new NodesReInitResponse(); + } + + @Override + public NodesReInitRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new NodesReInitRequestBuilder(client, this); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequest.java new file mode 100644 index 0000000000000..66d6a01575ada --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequest.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.reinit; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.nodes.BaseNodesRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Request for an update cluster settings action + */ +public class NodesReInitRequest extends BaseNodesRequest { + + private String secureStorePassword; + + public NodesReInitRequest() { + } + + /** + * Get usage from nodes based on the nodes ids specified. If none are + * passed, usage for all nodes will be returned. + */ + public NodesReInitRequest(String... nodesIds) { + super(nodesIds); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (secureStorePassword == null) { + validationException = addValidationError("secure store password cannot be null (use empty string).", validationException); + } + return validationException; + } + + public String secureStorePassword() { + return secureStorePassword; + } + + public NodesReInitRequest secureStorePassword(String secureStorePassword) { + this.secureStorePassword = secureStorePassword; + return this; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + secureStorePassword = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(secureStorePassword); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequestBuilder.java new file mode 100644 index 0000000000000..1e93cf3a4f890 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequestBuilder.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.reinit; + +import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + + +/** + * Builder for a cluster update settings request + */ +public class NodesReInitRequestBuilder + extends NodesOperationRequestBuilder { + + public NodesReInitRequestBuilder(ElasticsearchClient client, NodesReInitAction action) { + super(client, action, new NodesReInitRequest()); + } + + /** + * Sets the transient settings to be updated. They will not survive a full cluster restart + */ + public NodesReInitRequestBuilder setSecureStorePassword(String secureStorePassword) { + request.secureStorePassword(secureStorePassword); + return this; + } +// +// /** +// * Sets the source containing the transient settings to be updated. They will not survive a full cluster restart +// */ +// public ClusterReInitRequestBuilder setTransientSettings(String settings, XContentType xContentType) { +// request.transientSettings(settings, xContentType); +// return this; +// } +// +// /** +// * Sets the transient settings to be updated. They will not survive a full cluster restart +// */ +// public ClusterReInitRequestBuilder setTransientSettings(Map settings) { +// request.transientSettings(settings); +// return this; +// } +// +// /** +// * Sets the persistent settings to be updated. They will get applied cross restarts +// */ +// public ClusterReInitRequestBuilder setPersistentSettings(Settings settings) { +// request.persistentSettings(settings); +// return this; +// } +// +// /** +// * Sets the persistent settings to be updated. They will get applied cross restarts +// */ +// public ClusterReInitRequestBuilder setPersistentSettings(Settings.Builder settings) { +// request.persistentSettings(settings); +// return this; +// } +// +// /** +// * Sets the source containing the persistent settings to be updated. They will get applied cross restarts +// */ +// public ClusterReInitRequestBuilder setPersistentSettings(String settings, XContentType xContentType) { +// request.persistentSettings(settings, xContentType); +// return this; +// } +// +// /** +// * Sets the persistent settings to be updated. They will get applied cross restarts +// */ +// public ClusterReInitRequestBuilder setPersistentSettings(Map settings) { +// request.persistentSettings(settings); +// return this; +// } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitResponse.java new file mode 100644 index 0000000000000..05120da08fef3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitResponse.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.reinit; + +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.nodes.BaseNodeResponse; +import org.elasticsearch.action.support.nodes.BaseNodesResponse; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; + +import java.io.IOException; +import java.util.List; + +/** + * A response for a cluster update settings action. + */ +public class NodesReInitResponse extends BaseNodesResponse implements ToXContentFragment { + + public NodesReInitResponse() { + } + + public NodesReInitResponse(ClusterName clusterName, List nodes, List failures) { + super(clusterName, nodes, failures); + } + + @Override + protected List readNodesFrom(StreamInput in) throws IOException { + return in.readList(NodeResponse::readNodeResponse); + } + + @Override + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeStreamableList(nodes); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("nodes"); + for (final NodesReInitResponse.NodeResponse node : getNodes()) { + builder.startObject(node.getNode().getId()); + builder.field("name", node.getNode().getName()); + builder.endObject(); + } + builder.endObject(); + + return builder; + } + + @Override + public String toString() { + try { + final XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return builder.string(); + } catch (final IOException e) { + return "{ \"error\" : \"" + e.getMessage() + "\"}"; + } + } + + public static class NodeResponse extends BaseNodeResponse { + + public NodeResponse() { + } + + public NodeResponse(DiscoveryNode node) { + super(node); + } + + public static NodeResponse readNodeResponse(StreamInput in) throws IOException { + final NodeResponse node = new NodeResponse(); + node.readFrom(in); + return node; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/TransportNodesReInitAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/TransportNodesReInitAction.java new file mode 100644 index 0000000000000..3b0a2c2faf1f9 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/TransportNodesReInitAction.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.reinit; + +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.nodes.BaseNodeRequest; +import org.elasticsearch.action.support.nodes.TransportNodesAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; + +public class TransportNodesReInitAction extends TransportNodesAction { + + @Inject + public TransportNodesReInitAction(Settings settings, ThreadPool threadPool, + ClusterService clusterService, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, NodesReInitAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, + NodesReInitRequest::new, NodeRequest::new, ThreadPool.Names.MANAGEMENT, NodesReInitResponse.NodeResponse.class); + } + + @Override + protected NodesReInitResponse newResponse(NodesReInitRequest request, List responses, List failures) { + return new NodesReInitResponse(clusterService.getClusterName(), responses, failures); + } + + @Override + protected TransportNodesReInitAction.NodeRequest newNodeRequest(String nodeId, NodesReInitRequest request) { + return new NodeRequest(nodeId, request); + } + + @Override + protected NodesReInitResponse.NodeResponse newNodeResponse() { + return new NodesReInitResponse.NodeResponse(); + } + + @Override + protected NodesReInitResponse.NodeResponse nodeOperation(TransportNodesReInitAction.NodeRequest nodeStatsRequest) { + final NodesReInitRequest request = nodeStatsRequest.request; + return new NodesReInitResponse.NodeResponse(clusterService.localNode()); + } + + public static class NodeRequest extends BaseNodeRequest { + + NodesReInitRequest request; + + public NodeRequest() { + } + + NodeRequest(String nodeId, NodesReInitRequest request) { + super(nodeId); + this.request = request; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + request = new NodesReInitRequest(); + request.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + request.writeTo(out); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java index ccb3296a5d593..5e8719cf7539f 100644 --- a/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java @@ -48,6 +48,7 @@ import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageResponse; +import org.elasticsearch.action.admin.cluster.reinit.NodesReInitRequestBuilder; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse; @@ -113,7 +114,6 @@ import org.elasticsearch.action.ingest.SimulatePipelineRequestBuilder; import org.elasticsearch.action.ingest.SimulatePipelineResponse; import org.elasticsearch.action.ingest.WritePipelineResponse; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.tasks.TaskId; @@ -186,6 +186,11 @@ public interface ClusterAdminClient extends ElasticsearchClient { */ ClusterUpdateSettingsRequestBuilder prepareUpdateSettings(); + /** + * Re initialize each cluster node and pass them the secret store password. + */ + NodesReInitRequestBuilder prepareReInit(); + /** * Reroutes allocation of shards. Advance API. */ diff --git a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java index c0da35a307981..e9526ea0d1fe5 100644 --- a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -61,6 +61,8 @@ import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageResponse; +import org.elasticsearch.action.admin.cluster.reinit.NodesReInitAction; +import org.elasticsearch.action.admin.cluster.reinit.NodesReInitRequestBuilder; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder; @@ -783,6 +785,11 @@ public ClusterUpdateSettingsRequestBuilder prepareUpdateSettings() { return new ClusterUpdateSettingsRequestBuilder(this, ClusterUpdateSettingsAction.INSTANCE); } + @Override + public NodesReInitRequestBuilder prepareReInit() { + return new NodesReInitRequestBuilder(this, NodesReInitAction.INSTANCE); + } + @Override public ActionFuture nodesInfo(final NodesInfoRequest request) { return execute(NodesInfoAction.INSTANCE, request); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReInitAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReInitAction.java new file mode 100644 index 0000000000000..ca95df919aad3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReInitAction.java @@ -0,0 +1,82 @@ +/* + * ELASTICSEARCH CONFIDENTIAL + * __________________ + * + * [2014] Elasticsearch Incorporated. All Rights Reserved. + * + * NOTICE: All information contained herein is, and remains + * the property of Elasticsearch Incorporated and its suppliers, + * if any. The intellectual and technical concepts contained + * herein are proprietary to Elasticsearch Incorporated + * and its suppliers and may be covered by U.S. and Foreign Patents, + * patents in process, and are protected by trade secret or copyright law. + * Dissemination of this information or reproduction of this material + * is strictly forbidden unless prior written permission is obtained + * from Elasticsearch Incorporated. + */ + +package org.elasticsearch.rest.action.admin.cluster; + +import org.elasticsearch.action.admin.cluster.reinit.NodesReInitAction; +import org.elasticsearch.action.admin.cluster.reinit.NodesReInitRequest; +import org.elasticsearch.action.admin.cluster.reinit.NodesReInitResponse; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestActions; +import org.elasticsearch.rest.action.RestBuilderListener; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +public final class RestReInitAction extends BaseRestHandler { + + public RestReInitAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(POST, "/_nodes/reinit", this); + controller.registerHandler(POST, "/_nodes/{nodeId}/reinit", this); + } + + @Override + public String getName() { + return "nodes_reinit_action"; + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + final String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); + final NodesReInitRequest nodesReInitRequest = new NodesReInitRequest(nodesIds); + nodesReInitRequest.timeout(request.param("timeout")); + nodesReInitRequest.secureStorePassword(request.param("secureStorePassword", "")); + + return channel -> client.admin().cluster().execute(NodesReInitAction.INSTANCE, nodesReInitRequest, + new RestBuilderListener(channel) { + + @Override + public RestResponse buildResponse(NodesReInitResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + RestActions.buildNodesHeader(builder, channel.request(), response); + builder.field("cluster_name", response.getClusterName().value()); + response.toXContent(builder, channel.request()); + builder.endObject(); + + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + + } + + @Override + public boolean canTripCircuitBreaker() { + return false; + } + +} From 4b6e54123cf97978ef8880762b3a6f8b09c5a294 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Wed, 31 Jan 2018 09:29:19 +0200 Subject: [PATCH 02/21] Bare reinit plugins in action handler --- .../repositories/s3/S3RepositoryPlugin.java | 11 ++++- .../cluster/reinit/NodesReInitRequest.java | 2 +- .../reinit/NodesReInitRequestBuilder.java | 49 +------------------ .../reinit/TransportNodesReInitAction.java | 39 +++++++++++++-- .../plugins/ReInitializablePlugin.java | 26 ++++++++++ .../admin/cluster/RestReInitAction.java | 26 +++++----- 6 files changed, 86 insertions(+), 67 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/plugins/ReInitializablePlugin.java diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 010c4b92c21a0..573e36975dc4e 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -33,13 +33,14 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ReInitializablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.Repository; /** * A plugin to add a repository type that writes to and from the AWS S3. */ -public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin { +public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, ReInitializablePlugin { static { SpecialPermission.check(); @@ -50,7 +51,7 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin { // ClientConfiguration clinit has some classloader problems // TODO: fix that Class.forName("com.amazonaws.ClientConfiguration"); - } catch (ClassNotFoundException e) { + } catch (final ClassNotFoundException e) { throw new RuntimeException(e); } return null; @@ -92,4 +93,10 @@ public List> getSettings() { S3ClientSettings.MAX_RETRIES_SETTING, S3ClientSettings.USE_THROTTLE_RETRIES_SETTING); } + + @Override + public boolean reinit(Settings settings) { + // TODO clientSettings + return false; + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequest.java index 66d6a01575ada..18b2bc6792017 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequest.java @@ -37,7 +37,7 @@ public class NodesReInitRequest extends BaseNodesRequest { public NodesReInitRequest() { } - + /** * Get usage from nodes based on the nodes ids specified. If none are * passed, usage for all nodes will be returned. diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequestBuilder.java index 1e93cf3a4f890..95c5eef90abc8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequestBuilder.java @@ -40,52 +40,5 @@ public NodesReInitRequestBuilder setSecureStorePassword(String secureStorePasswo request.secureStorePassword(secureStorePassword); return this; } -// -// /** -// * Sets the source containing the transient settings to be updated. They will not survive a full cluster restart -// */ -// public ClusterReInitRequestBuilder setTransientSettings(String settings, XContentType xContentType) { -// request.transientSettings(settings, xContentType); -// return this; -// } -// -// /** -// * Sets the transient settings to be updated. They will not survive a full cluster restart -// */ -// public ClusterReInitRequestBuilder setTransientSettings(Map settings) { -// request.transientSettings(settings); -// return this; -// } -// -// /** -// * Sets the persistent settings to be updated. They will get applied cross restarts -// */ -// public ClusterReInitRequestBuilder setPersistentSettings(Settings settings) { -// request.persistentSettings(settings); -// return this; -// } -// -// /** -// * Sets the persistent settings to be updated. They will get applied cross restarts -// */ -// public ClusterReInitRequestBuilder setPersistentSettings(Settings.Builder settings) { -// request.persistentSettings(settings); -// return this; -// } -// -// /** -// * Sets the source containing the persistent settings to be updated. They will get applied cross restarts -// */ -// public ClusterReInitRequestBuilder setPersistentSettings(String settings, XContentType xContentType) { -// request.persistentSettings(settings, xContentType); -// return this; -// } -// -// /** -// * Sets the persistent settings to be updated. They will get applied cross restarts -// */ -// public ClusterReInitRequestBuilder setPersistentSettings(Map settings) { -// request.persistentSettings(settings); -// return this; -// } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/TransportNodesReInitAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/TransportNodesReInitAction.java index 3b0a2c2faf1f9..c26d83a930dd4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/TransportNodesReInitAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/TransportNodesReInitAction.java @@ -28,11 +28,16 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.ReInitializablePlugin; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.security.GeneralSecurityException; import java.util.List; public class TransportNodesReInitAction extends TransportNodesAction { + private final Environment environment; + private final PluginsService pluginsService; + @Inject - public TransportNodesReInitAction(Settings settings, ThreadPool threadPool, - ClusterService clusterService, TransportService transportService, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + public TransportNodesReInitAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, Environment environment, + PluginsService pluginService) { super(settings, NodesReInitAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, NodesReInitRequest::new, NodeRequest::new, ThreadPool.Names.MANAGEMENT, NodesReInitResponse.NodeResponse.class); + this.environment = environment; + this.pluginsService = pluginService; } @Override - protected NodesReInitResponse newResponse(NodesReInitRequest request, List responses, List failures) { + protected NodesReInitResponse newResponse(NodesReInitRequest request, List responses, + List failures) { return new NodesReInitResponse(clusterService.getClusterName(), responses, failures); } @@ -66,6 +78,25 @@ protected NodesReInitResponse.NodeResponse newNodeResponse() { @Override protected NodesReInitResponse.NodeResponse nodeOperation(TransportNodesReInitAction.NodeRequest nodeStatsRequest) { final NodesReInitRequest request = nodeStatsRequest.request; + // open keystore + KeyStoreWrapper keystore = null; + try { + keystore = KeyStoreWrapper.load(environment.configFile()); + keystore.decrypt(new char[0] /* use password from request */); + } catch (GeneralSecurityException | IOException e) { + throw new RuntimeException(e); + } finally { + if (keystore != null) { + keystore.close(); + } + } + + final Settings.Builder builder = Settings.builder().put(environment.settings(), false); + builder.setSecureSettings(keystore); + + final boolean success = pluginsService.filterPlugins(ReInitializablePlugin.class).stream().map(p -> p.reinit(builder.build())).allMatch( + e -> e == true); + return new NodesReInitResponse.NodeResponse(clusterService.localNode()); } diff --git a/server/src/main/java/org/elasticsearch/plugins/ReInitializablePlugin.java b/server/src/main/java/org/elasticsearch/plugins/ReInitializablePlugin.java new file mode 100644 index 0000000000000..8295305a97110 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/plugins/ReInitializablePlugin.java @@ -0,0 +1,26 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugins; + +import org.elasticsearch.common.settings.Settings; + +public interface ReInitializablePlugin { + boolean reinit(Settings settings); +} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReInitAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReInitAction.java index ca95df919aad3..c7a5d5d809f47 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReInitAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReInitAction.java @@ -1,18 +1,20 @@ /* - * ELASTICSEARCH CONFIDENTIAL - * __________________ + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * [2014] Elasticsearch Incorporated. All Rights Reserved. + * http://www.apache.org/licenses/LICENSE-2.0 * - * NOTICE: All information contained herein is, and remains - * the property of Elasticsearch Incorporated and its suppliers, - * if any. The intellectual and technical concepts contained - * herein are proprietary to Elasticsearch Incorporated - * and its suppliers and may be covered by U.S. and Foreign Patents, - * patents in process, and are protected by trade secret or copyright law. - * Dissemination of this information or reproduction of this material - * is strictly forbidden unless prior written permission is obtained - * from Elasticsearch Incorporated. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ package org.elasticsearch.rest.action.admin.cluster; From 98854df4cdf30dfb589950c284c52780ff6fd993 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Thu, 22 Feb 2018 15:30:53 +0200 Subject: [PATCH 03/21] Fix checkstyle --- .../admin/cluster/reinit/TransportNodesReInitAction.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/TransportNodesReInitAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/TransportNodesReInitAction.java index c26d83a930dd4..1ae53f0da1e2f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/TransportNodesReInitAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/TransportNodesReInitAction.java @@ -94,8 +94,8 @@ protected NodesReInitResponse.NodeResponse nodeOperation(TransportNodesReInitAct final Settings.Builder builder = Settings.builder().put(environment.settings(), false); builder.setSecureSettings(keystore); - final boolean success = pluginsService.filterPlugins(ReInitializablePlugin.class).stream().map(p -> p.reinit(builder.build())).allMatch( - e -> e == true); + final boolean success = pluginsService.filterPlugins(ReInitializablePlugin.class).stream() + .map(p -> p.reinit(builder.build())).allMatch(e -> e == true); return new NodesReInitResponse.NodeResponse(clusterService.localNode()); } From 3db5ae0849641f0c1f4297315208b203416f7eb2 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 13 Mar 2018 18:59:21 +0200 Subject: [PATCH 04/21] Update s3 secure settings (#28517) Cache of clients by name which can be cleared when secure settings get updated. --- plugins/repository-s3/build.gradle | 2 +- .../repositories/s3/AmazonS3Reference.java | 63 ++++++ .../repositories/s3/AwsS3Service.java | 28 ++- .../repositories/s3/InternalAwsS3Service.java | 141 ++++++------ .../repositories/s3/S3BlobContainer.java | 175 ++++++++------- .../repositories/s3/S3BlobStore.java | 71 +++--- .../repositories/s3/S3ClientSettings.java | 70 ++++-- .../repositories/s3/S3Repository.java | 42 ++-- .../repositories/s3/S3RepositoryPlugin.java | 39 +++- .../plugin-metadata/plugin-security.policy | 3 + .../s3/AbstractS3SnapshotRestoreTest.java | 26 +-- .../repositories/s3/AmazonS3Wrapper.java | 5 + .../s3/AwsS3ServiceImplTests.java | 136 ++++++----- .../repositories/s3/MockAmazonS3.java | 5 + .../s3/RepositoryCredentialsTests.java | 211 ++++++++++++++++++ .../RepositorySettingsCredentialsTests.java | 41 ---- .../s3/S3BlobStoreContainerTests.java | 49 ++-- .../repositories/s3/S3RepositoryTests.java | 86 ++++--- .../repositories/s3/TestAmazonS3.java | 38 ++-- .../repositories/s3/TestAwsS3Service.java | 29 +-- 20 files changed, 821 insertions(+), 439 deletions(-) create mode 100644 plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java create mode 100644 plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java delete mode 100644 plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositorySettingsCredentialsTests.java diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index ae971cfe4e1ec..e7fd7823a4d83 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -55,7 +55,7 @@ bundlePlugin { } additionalTest('testRepositoryCreds'){ - include '**/RepositorySettingsCredentialsTests.class' + include '**/RepositoryCredentialsTests.class' systemProperty 'es.allow_insecure_settings', 'true' } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java new file mode 100644 index 0000000000000..6734fcfb56df5 --- /dev/null +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.s3; + +import org.elasticsearch.common.util.concurrent.AbstractRefCounted; + +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3Client; + +import org.elasticsearch.common.lease.Releasable; + +/** + * Handles the shutdown of the wrapped {@link AmazonS3Client} using reference + * counting. + */ +public class AmazonS3Reference extends AbstractRefCounted implements Releasable { + + private final AmazonS3 client; + + AmazonS3Reference(AmazonS3 client) { + super("AWS_S3_CLIENT"); + this.client = client; + } + + /** + * Call when the client is not needed anymore. + */ + @Override + public void close() { + decRef(); + } + + /** + * Returns the underlying `AmazonS3` client. All method calls are permitted BUT + * NOT shutdown. Shutdown is called when reference count reaches 0. + */ + public AmazonS3 client() { + return client; + } + + @Override + protected void closeInternal() { + client.shutdown(); + } + +} \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java index dbffe293a43b1..38e39747de7fa 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java @@ -19,14 +19,30 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.services.s3.AmazonS3; -import org.elasticsearch.common.component.LifecycleComponent; -import org.elasticsearch.common.settings.Settings; +import java.util.Map; -interface AwsS3Service extends LifecycleComponent { +interface AwsS3Service { /** - * Creates an {@code AmazonS3} client from the given repository metadata and node settings. + * Creates then caches an {@code AmazonS3} client using the current client + * settings. */ - AmazonS3 client(Settings repositorySettings); + AmazonS3Reference client(String clientName); + + /** + * Updates settings for building clients. Future client requests will use the + * new settings. Implementations SHOULD drop the client cache to prevent reusing + * clients with old settings from cache. + * + * @param clientsSettings + * the new settings + * @return the old settings + */ + Map updateClientsSettings(Map clientsSettings); + + /** + * Releases cached clients. Subsequent client requests will recreate client + * instances. Does not touch the client settings. + */ + void releaseCachedClients(); } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java index d70ed9ea9aa8b..381b72bdf950c 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java @@ -28,66 +28,88 @@ import com.amazonaws.internal.StaticCredentialsProvider; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3Client; + import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; - -import java.util.HashMap; import java.util.Map; -import java.util.function.Function; - - -class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Service { +import static java.util.Collections.emptyMap; - // pkg private for tests - static final Setting CLIENT_NAME = new Setting<>("client", "default", Function.identity()); - private final Map clientsSettings; +class InternalAwsS3Service extends AbstractComponent implements AwsS3Service { - private final Map clientsCache = new HashMap<>(); + private volatile Map clientsCache = emptyMap(); + private volatile Map clientsSettings = emptyMap(); - InternalAwsS3Service(Settings settings, Map clientsSettings) { + InternalAwsS3Service(Settings settings) { super(settings); - this.clientsSettings = clientsSettings; } + /** + * Reloads the settings for the AmazonS3 client. New clients will be build using + * these. Old clients are usable until released. On release they will be + * destroyed contrary to being returned to the cache. + */ @Override - public synchronized AmazonS3 client(Settings repositorySettings) { - String clientName = CLIENT_NAME.get(repositorySettings); - AmazonS3Client client = clientsCache.get(clientName); - if (client != null) { - return client; - } + public synchronized Map updateClientsSettings(Map clientsSettings) { + // shutdown all unused clients + // others will shutdown on their respective release + releaseCachedClients(); + final Map prevSettings = this.clientsSettings; + this.clientsSettings = MapBuilder.newMapBuilder(clientsSettings).immutableMap(); + assert this.clientsSettings.containsKey("default") : "always at least have 'default'"; + // clients are built lazily by {@link client(String)} + return prevSettings; + } - S3ClientSettings clientSettings = clientsSettings.get(clientName); - if (clientSettings == null) { - throw new IllegalArgumentException("Unknown s3 client name [" + clientName + "]. Existing client configs: " + - Strings.collectionToDelimitedString(clientsSettings.keySet(), ",")); + /** + * Attempts to retrieve a client by name from the cache. If the client does not + * exist it will be created. + */ + @Override + public AmazonS3Reference client(String clientName) { + AmazonS3Reference clientReference = clientsCache.get(clientName); + if ((clientReference != null) && clientReference.tryIncRef()) { + return clientReference; } + synchronized (this) { + clientReference = clientsCache.get(clientName); + if ((clientReference != null) && clientReference.tryIncRef()) { + return clientReference; + } + final S3ClientSettings clientSettings = clientsSettings.get(clientName); + if (clientSettings == null) { + throw new IllegalArgumentException("Unknown s3 client name [" + clientName + "]. Existing client configs: " + + Strings.collectionToDelimitedString(clientsSettings.keySet(), ",")); + } + logger.debug("creating S3 client with client_name [{}], endpoint [{}]", clientName, clientSettings.endpoint); + clientReference = new AmazonS3Reference(buildClient(clientSettings)); + clientReference.incRef(); + clientsCache = MapBuilder.newMapBuilder(clientsCache).put(clientName, clientReference).immutableMap(); + return clientReference; + } + } - logger.debug("creating S3 client with client_name [{}], endpoint [{}]", clientName, clientSettings.endpoint); - - AWSCredentialsProvider credentials = buildCredentials(logger, deprecationLogger, clientSettings, repositorySettings); - ClientConfiguration configuration = buildConfiguration(clientSettings); - - client = new AmazonS3Client(credentials, configuration); - + private AmazonS3 buildClient(S3ClientSettings clientSettings) { + final AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings); + final ClientConfiguration configuration = buildConfiguration(clientSettings); + final AmazonS3 client = buildClient(credentials, configuration); if (Strings.hasText(clientSettings.endpoint)) { client.setEndpoint(clientSettings.endpoint); } - - clientsCache.put(clientName, client); return client; } + // proxy for testing + AmazonS3 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) { + return new AmazonS3Client(credentials, configuration); + } + // pkg private for tests static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings) { - ClientConfiguration clientConfiguration = new ClientConfiguration(); + final ClientConfiguration clientConfiguration = new ClientConfiguration(); // the response metadata cache is only there for diagnostics purposes, // but can force objects from every response to the old generation. clientConfiguration.setResponseMetadataCacheSize(0); @@ -109,27 +131,8 @@ static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings) { } // pkg private for tests - static AWSCredentialsProvider buildCredentials(Logger logger, DeprecationLogger deprecationLogger, - S3ClientSettings clientSettings, Settings repositorySettings) { - - - BasicAWSCredentials credentials = clientSettings.credentials; - if (S3Repository.ACCESS_KEY_SETTING.exists(repositorySettings)) { - if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings) == false) { - throw new IllegalArgumentException("Repository setting [" + S3Repository.ACCESS_KEY_SETTING.getKey() + - " must be accompanied by setting [" + S3Repository.SECRET_KEY_SETTING.getKey() + "]"); - } - try (SecureString key = S3Repository.ACCESS_KEY_SETTING.get(repositorySettings); - SecureString secret = S3Repository.SECRET_KEY_SETTING.get(repositorySettings)) { - credentials = new BasicAWSCredentials(key.toString(), secret.toString()); - } - // backcompat for reading keys out of repository settings - deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead " + - "store these in named clients and the elasticsearch keystore for secure settings."); - } else if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings)) { - throw new IllegalArgumentException("Repository setting [" + S3Repository.SECRET_KEY_SETTING.getKey() + - " must be accompanied by setting [" + S3Repository.ACCESS_KEY_SETTING.getKey() + "]"); - } + static AWSCredentialsProvider buildCredentials(Logger logger, S3ClientSettings clientSettings) { + final BasicAWSCredentials credentials = clientSettings.credentials; if (credentials == null) { logger.debug("Using instance profile credentials"); return new PrivilegedInstanceProfileCredentialsProvider(); @@ -140,20 +143,15 @@ static AWSCredentialsProvider buildCredentials(Logger logger, DeprecationLogger } @Override - protected void doStart() throws ElasticsearchException { - } - - @Override - protected void doStop() throws ElasticsearchException { - } - - @Override - protected void doClose() throws ElasticsearchException { - for (AmazonS3Client client : clientsCache.values()) { - client.shutdown(); + public synchronized void releaseCachedClients() { + // the clients will shutdown when they will not be used anymore + for (final AmazonS3Reference clientReference : clientsCache.values()) { + clientReference.decRef(); } - - // Ensure that IdleConnectionReaper is shutdown + // clear previously cached clients, they will be build lazily + clientsCache = emptyMap(); + // shutdown IdleConnectionReaper background thread + // it will be restarted on new client usage IdleConnectionReaper.shutdown(); } @@ -174,4 +172,5 @@ public void refresh() { SocketAccess.doPrivilegedVoid(credentials::refresh); } } + } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index 401ef0933a847..04880df2c47e8 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -20,7 +20,6 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.AmazonClientException; -import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; import com.amazonaws.services.s3.model.AmazonS3Exception; import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; @@ -49,8 +48,6 @@ import java.io.InputStream; import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; -import java.security.AccessController; -import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -72,19 +69,20 @@ class S3BlobContainer extends AbstractBlobContainer { @Override public boolean blobExists(String blobName) { - try { - return SocketAccess.doPrivileged(() -> blobStore.client().doesObjectExist(blobStore.bucket(), buildKey(blobName))); - } catch (Exception e) { + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + return SocketAccess.doPrivileged(() -> clientReference.client().doesObjectExist(blobStore.bucket(), buildKey(blobName))); + } catch (final Exception e) { throw new BlobStoreException("Failed to check if blob [" + blobName +"] exists", e); } } @Override public InputStream readBlob(String blobName) throws IOException { - try { - S3Object s3Object = SocketAccess.doPrivileged(() -> blobStore.client().getObject(blobStore.bucket(), buildKey(blobName))); + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + final S3Object s3Object = SocketAccess.doPrivileged(() -> clientReference.client().getObject(blobStore.bucket(), + buildKey(blobName))); return s3Object.getObjectContent(); - } catch (AmazonClientException e) { + } catch (final AmazonClientException e) { if (e instanceof AmazonS3Exception) { if (404 == ((AmazonS3Exception) e).getStatusCode()) { throw new NoSuchFileException("Blob object [" + blobName + "] not found: " + e.getMessage()); @@ -100,14 +98,11 @@ public void writeBlob(String blobName, InputStream inputStream, long blobSize) t throw new FileAlreadyExistsException("Blob [" + blobName + "] already exists, cannot overwrite"); } - SocketAccess.doPrivilegedIOException(() -> { - if (blobSize <= blobStore.bufferSizeInBytes()) { - executeSingleUpload(blobStore, buildKey(blobName), inputStream, blobSize); - } else { - executeMultipartUpload(blobStore, buildKey(blobName), inputStream, blobSize); - } - return null; - }); + if (blobSize <= blobStore.bufferSizeInBytes()) { + executeSingleUpload(blobStore, buildKey(blobName), inputStream, blobSize); + } else { + executeMultipartUpload(blobStore, buildKey(blobName), inputStream, blobSize); + } } @Override @@ -116,64 +111,64 @@ public void deleteBlob(String blobName) throws IOException { throw new NoSuchFileException("Blob [" + blobName + "] does not exist"); } - try { - SocketAccess.doPrivilegedVoid(() -> blobStore.client().deleteObject(blobStore.bucket(), buildKey(blobName))); - } catch (AmazonClientException e) { + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + SocketAccess.doPrivilegedVoid(() -> clientReference.client().deleteObject(blobStore.bucket(), buildKey(blobName))); + } catch (final AmazonClientException e) { throw new IOException("Exception when deleting blob [" + blobName + "]", e); } } @Override public Map listBlobsByPrefix(@Nullable String blobNamePrefix) throws IOException { - return AccessController.doPrivileged((PrivilegedAction>) () -> { - MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); - AmazonS3 client = blobStore.client(); - SocketAccess.doPrivilegedVoid(() -> { - ObjectListing prevListing = null; - while (true) { - ObjectListing list; - if (prevListing != null) { - list = client.listNextBatchOfObjects(prevListing); - } else { - if (blobNamePrefix != null) { - list = client.listObjects(blobStore.bucket(), buildKey(blobNamePrefix)); - } else { - list = client.listObjects(blobStore.bucket(), keyPath); - } - } - for (S3ObjectSummary summary : list.getObjectSummaries()) { - String name = summary.getKey().substring(keyPath.length()); - blobsBuilder.put(name, new PlainBlobMetaData(name, summary.getSize())); - } - if (list.isTruncated()) { - prevListing = list; + final MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + ObjectListing prevListing = null; + while (true) { + ObjectListing list; + if (prevListing != null) { + final ObjectListing finalPrevListing = prevListing; + list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(finalPrevListing)); + } else { + if (blobNamePrefix != null) { + list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(blobStore.bucket(), + buildKey(blobNamePrefix))); } else { - break; + list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(blobStore.bucket(), keyPath)); } } - }); + for (final S3ObjectSummary summary : list.getObjectSummaries()) { + final String name = summary.getKey().substring(keyPath.length()); + blobsBuilder.put(name, new PlainBlobMetaData(name, summary.getSize())); + } + if (list.isTruncated()) { + prevListing = list; + } else { + break; + } + } return blobsBuilder.immutableMap(); - }); + } catch (final AmazonClientException e) { + throw new IOException("Exception when listing blobs by prefix [" + blobNamePrefix + "]", e); + } } @Override public void move(String sourceBlobName, String targetBlobName) throws IOException { - try { - CopyObjectRequest request = new CopyObjectRequest(blobStore.bucket(), buildKey(sourceBlobName), - blobStore.bucket(), buildKey(targetBlobName)); - - if (blobStore.serverSideEncryption()) { - ObjectMetadata objectMetadata = new ObjectMetadata(); - objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); - request.setNewObjectMetadata(objectMetadata); - } + final CopyObjectRequest request = new CopyObjectRequest(blobStore.bucket(), buildKey(sourceBlobName), blobStore.bucket(), + buildKey(targetBlobName)); + + if (blobStore.serverSideEncryption()) { + final ObjectMetadata objectMetadata = new ObjectMetadata(); + objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); + request.setNewObjectMetadata(objectMetadata); + } + try (AmazonS3Reference clientReference = blobStore.clientReference()) { SocketAccess.doPrivilegedVoid(() -> { - blobStore.client().copyObject(request); - blobStore.client().deleteObject(blobStore.bucket(), buildKey(sourceBlobName)); + clientReference.client().copyObject(request); + clientReference.client().deleteObject(blobStore.bucket(), buildKey(sourceBlobName)); }); - - } catch (AmazonS3Exception e) { + } catch (final AmazonS3Exception e) { throw new IOException(e); } } @@ -203,19 +198,20 @@ void executeSingleUpload(final S3BlobStore blobStore, throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than buffer size"); } - try { - final ObjectMetadata md = new ObjectMetadata(); - md.setContentLength(blobSize); - if (blobStore.serverSideEncryption()) { - md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); - } - - final PutObjectRequest putRequest = new PutObjectRequest(blobStore.bucket(), blobName, input, md); - putRequest.setStorageClass(blobStore.getStorageClass()); - putRequest.setCannedAcl(blobStore.getCannedACL()); + final ObjectMetadata md = new ObjectMetadata(); + md.setContentLength(blobSize); + if (blobStore.serverSideEncryption()) { + md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); + } + final PutObjectRequest putRequest = new PutObjectRequest(blobStore.bucket(), blobName, input, md); + putRequest.setStorageClass(blobStore.getStorageClass()); + putRequest.setCannedAcl(blobStore.getCannedACL()); - blobStore.client().putObject(putRequest); - } catch (AmazonClientException e) { + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + SocketAccess.doPrivilegedVoid(() -> { + clientReference.client().putObject(putRequest); + }); + } catch (final AmazonClientException e) { throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e); } } @@ -246,23 +242,23 @@ void executeMultipartUpload(final S3BlobStore blobStore, final int nbParts = multiparts.v1().intValue(); final long lastPartSize = multiparts.v2(); - assert blobSize == (nbParts - 1) * partSize + lastPartSize : "blobSize does not match multipart sizes"; + assert blobSize == (((nbParts - 1) * partSize) + lastPartSize) : "blobSize does not match multipart sizes"; final SetOnce uploadId = new SetOnce<>(); final String bucketName = blobStore.bucket(); boolean success = false; - try { - final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, blobName); - initRequest.setStorageClass(blobStore.getStorageClass()); - initRequest.setCannedACL(blobStore.getCannedACL()); - if (blobStore.serverSideEncryption()) { - final ObjectMetadata md = new ObjectMetadata(); - md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); - initRequest.setObjectMetadata(md); - } + final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, blobName); + initRequest.setStorageClass(blobStore.getStorageClass()); + initRequest.setCannedACL(blobStore.getCannedACL()); + if (blobStore.serverSideEncryption()) { + final ObjectMetadata md = new ObjectMetadata(); + md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); + initRequest.setObjectMetadata(md); + } + try (AmazonS3Reference clientReference = blobStore.clientReference()) { - uploadId.set(blobStore.client().initiateMultipartUpload(initRequest).getUploadId()); + uploadId.set(SocketAccess.doPrivileged(() -> clientReference.client().initiateMultipartUpload(initRequest).getUploadId())); if (Strings.isEmpty(uploadId.get())) { throw new IOException("Failed to initialize multipart upload " + blobName); } @@ -287,7 +283,7 @@ void executeMultipartUpload(final S3BlobStore blobStore, } bytesCount += uploadRequest.getPartSize(); - final UploadPartResult uploadResponse = blobStore.client().uploadPart(uploadRequest); + final UploadPartResult uploadResponse = SocketAccess.doPrivileged(() -> clientReference.client().uploadPart(uploadRequest)); parts.add(uploadResponse.getPartETag()); } @@ -296,16 +292,19 @@ void executeMultipartUpload(final S3BlobStore blobStore, + "bytes sent but got " + bytesCount); } - CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId.get(), parts); - blobStore.client().completeMultipartUpload(complRequest); + final CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId.get(), + parts); + SocketAccess.doPrivilegedVoid(() -> clientReference.client().completeMultipartUpload(complRequest)); success = true; - } catch (AmazonClientException e) { + } catch (final AmazonClientException e) { throw new IOException("Unable to upload object [" + blobName + "] using multipart upload", e); } finally { - if (success == false && Strings.hasLength(uploadId.get())) { + if ((success == false) && Strings.hasLength(uploadId.get())) { final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, blobName, uploadId.get()); - blobStore.client().abortMultipartUpload(abortRequest); + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortRequest)); + } } } } @@ -324,7 +323,7 @@ static Tuple numberOfMultiparts(final long totalSize, final long par throw new IllegalArgumentException("Part size must be greater than zero"); } - if (totalSize == 0L || totalSize <= partSize) { + if ((totalSize == 0L) || (totalSize <= partSize)) { return Tuple.tuple(1L, totalSize); } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index 27349f12135ed..c0d89c0f8fd01 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -19,13 +19,13 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.CannedAccessControlList; import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.DeleteObjectsRequest.KeyVersion; import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.S3ObjectSummary; import com.amazonaws.services.s3.model.StorageClass; + import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; @@ -34,14 +34,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; -import java.security.AccessController; -import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.Locale; class S3BlobStore extends AbstractComponent implements BlobStore { - private final AmazonS3 client; + private final AwsS3Service service; + + private final String clientName; private final String bucket; @@ -53,10 +53,11 @@ class S3BlobStore extends AbstractComponent implements BlobStore { private final StorageClass storageClass; - S3BlobStore(Settings settings, AmazonS3 client, String bucket, boolean serverSideEncryption, + S3BlobStore(Settings settings, AwsS3Service service, String clientName, String bucket, boolean serverSideEncryption, ByteSizeValue bufferSize, String cannedACL, String storageClass) { super(settings); - this.client = client; + this.service = service; + this.clientName = clientName; this.bucket = bucket; this.serverSideEncryption = serverSideEncryption; this.bufferSize = bufferSize; @@ -68,12 +69,14 @@ class S3BlobStore extends AbstractComponent implements BlobStore { // Also, if invalid security credentials are used to execute this method, the // client is not able to distinguish between bucket permission errors and // invalid credential errors, and this method could return an incorrect result. - SocketAccess.doPrivilegedVoid(() -> { - if (client.doesBucketExist(bucket) == false) { - throw new IllegalArgumentException("The bucket [" + bucket + "] does not exist. Please create it before " + - " creating an s3 snapshot repository backed by it."); - } - }); + try (AmazonS3Reference clientReference = clientReference()) { + SocketAccess.doPrivilegedVoid(() -> { + if (clientReference.client().doesBucketExist(bucket) == false) { + throw new IllegalArgumentException("The bucket [" + bucket + "] does not exist. Please create it before " + + " creating an s3 snapshot repository backed by it."); + } + }); + } } @Override @@ -81,8 +84,8 @@ public String toString() { return bucket; } - public AmazonS3 client() { - return client; + public AmazonS3Reference clientReference() { + return service.client(clientName); } public String bucket() { @@ -104,27 +107,30 @@ public BlobContainer blobContainer(BlobPath path) { @Override public void delete(BlobPath path) { - AccessController.doPrivileged((PrivilegedAction) () -> { + try (AmazonS3Reference clientReference = clientReference()) { ObjectListing prevListing = null; - //From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html - //we can do at most 1K objects per delete - //We don't know the bucket name until first object listing + // From + // http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html + // we can do at most 1K objects per delete + // We don't know the bucket name until first object listing DeleteObjectsRequest multiObjectDeleteRequest = null; - ArrayList keys = new ArrayList<>(); + final ArrayList keys = new ArrayList<>(); while (true) { ObjectListing list; if (prevListing != null) { - list = client.listNextBatchOfObjects(prevListing); + final ObjectListing finalPrevListing = prevListing; + list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(finalPrevListing)); } else { - list = client.listObjects(bucket, path.buildAsString()); + list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(bucket, path.buildAsString())); multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); } - for (S3ObjectSummary summary : list.getObjectSummaries()) { + for (final S3ObjectSummary summary : list.getObjectSummaries()) { keys.add(new KeyVersion(summary.getKey())); - //Every 500 objects batch the delete request + // Every 500 objects batch the delete request if (keys.size() > 500) { multiObjectDeleteRequest.setKeys(keys); - client.deleteObjects(multiObjectDeleteRequest); + final DeleteObjectsRequest finalMultiObjectDeleteRequest = multiObjectDeleteRequest; + SocketAccess.doPrivilegedVoid(() -> clientReference.client().deleteObjects(finalMultiObjectDeleteRequest)); multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); keys.clear(); } @@ -137,14 +143,15 @@ public void delete(BlobPath path) { } if (!keys.isEmpty()) { multiObjectDeleteRequest.setKeys(keys); - client.deleteObjects(multiObjectDeleteRequest); + final DeleteObjectsRequest finalMultiObjectDeleteRequest = multiObjectDeleteRequest; + SocketAccess.doPrivilegedVoid(() -> clientReference.client().deleteObjects(finalMultiObjectDeleteRequest)); } - return null; - }); + } } @Override public void close() { + this.service.releaseCachedClients(); } public CannedAccessControlList getCannedACL() { @@ -154,18 +161,18 @@ public CannedAccessControlList getCannedACL() { public StorageClass getStorageClass() { return storageClass; } public static StorageClass initStorageClass(String storageClass) { - if (storageClass == null || storageClass.equals("")) { + if ((storageClass == null) || storageClass.equals("")) { return StorageClass.Standard; } try { - StorageClass _storageClass = StorageClass.fromValue(storageClass.toUpperCase(Locale.ENGLISH)); + final StorageClass _storageClass = StorageClass.fromValue(storageClass.toUpperCase(Locale.ENGLISH)); if (_storageClass.equals(StorageClass.Glacier)) { throw new BlobStoreException("Glacier storage class is not supported"); } return _storageClass; - } catch (IllegalArgumentException illegalArgumentException) { + } catch (final IllegalArgumentException illegalArgumentException) { throw new BlobStoreException("`" + storageClass + "` is not a valid S3 Storage Class."); } } @@ -174,11 +181,11 @@ public static StorageClass initStorageClass(String storageClass) { * Constructs canned acl from string */ public static CannedAccessControlList initCannedACL(String cannedACL) { - if (cannedACL == null || cannedACL.equals("")) { + if ((cannedACL == null) || cannedACL.equals("")) { return CannedAccessControlList.Private; } - for (CannedAccessControlList cur : CannedAccessControlList.values()) { + for (final CannedAccessControlList cur : CannedAccessControlList.values()) { if (cur.toString().equalsIgnoreCase(cannedACL)) { return cur; } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java index 4d32d2518fff1..e4d0d98a4e546 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java @@ -24,10 +24,11 @@ import java.util.Locale; import java.util.Map; import java.util.Set; - import com.amazonaws.ClientConfiguration; import com.amazonaws.Protocol; import com.amazonaws.auth.BasicAWSCredentials; + +import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; @@ -119,7 +120,7 @@ class S3ClientSettings { /** Whether the s3 client should use an exponential backoff retry policy. */ final boolean throttleRetries; - private S3ClientSettings(BasicAWSCredentials credentials, String endpoint, Protocol protocol, + protected S3ClientSettings(BasicAWSCredentials credentials, String endpoint, Protocol protocol, String proxyHost, int proxyPort, String proxyUsername, String proxyPassword, int readTimeoutMillis, int maxRetries, boolean throttleRetries) { this.credentials = credentials; @@ -140,9 +141,9 @@ private S3ClientSettings(BasicAWSCredentials credentials, String endpoint, Proto * Note this will always at least return a client named "default". */ static Map load(Settings settings) { - Set clientNames = settings.getGroups(PREFIX).keySet(); - Map clients = new HashMap<>(); - for (String clientName : clientNames) { + final Set clientNames = settings.getGroups(PREFIX).keySet(); + final Map clients = new HashMap<>(); + for (final String clientName : clientNames) { clients.put(clientName, getClientSettings(settings, clientName)); } if (clients.containsKey("default") == false) { @@ -153,23 +154,64 @@ static Map load(Settings settings) { return Collections.unmodifiableMap(clients); } - // pkg private for tests - /** Parse settings for a single client. */ - static S3ClientSettings getClientSettings(Settings settings, String clientName) { + static Map overrideCredentials(Map clientsSettings, + BasicAWSCredentials credentials) { + final MapBuilder mapBuilder = new MapBuilder<>(); + for (final Map.Entry entry : clientsSettings.entrySet()) { + final S3ClientSettings s3ClientSettings = new S3ClientSettings(credentials, entry.getValue().endpoint, + entry.getValue().protocol, entry.getValue().proxyHost, entry.getValue().proxyPort, entry.getValue().proxyUsername, + entry.getValue().proxyPassword, entry.getValue().readTimeoutMillis, entry.getValue().maxRetries, + entry.getValue().throttleRetries); + mapBuilder.put(entry.getKey(), s3ClientSettings); + } + return mapBuilder.immutableMap(); + } + + static boolean checkDeprecatedCredentials(Settings repositorySettings) { + if (S3Repository.ACCESS_KEY_SETTING.exists(repositorySettings)) { + if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings) == false) { + throw new IllegalArgumentException("Repository setting [" + S3Repository.ACCESS_KEY_SETTING.getKey() + + " must be accompanied by setting [" + S3Repository.SECRET_KEY_SETTING.getKey() + "]"); + } + return true; + } else if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings)) { + throw new IllegalArgumentException("Repository setting [" + S3Repository.SECRET_KEY_SETTING.getKey() + + " must be accompanied by setting [" + S3Repository.ACCESS_KEY_SETTING.getKey() + "]"); + } + return false; + } + + // backcompat for reading keys out of repository settings (clusterState) + static BasicAWSCredentials loadDeprecatedCredentials(Settings repositorySettings) { + assert checkDeprecatedCredentials(repositorySettings); + try (SecureString key = S3Repository.ACCESS_KEY_SETTING.get(repositorySettings); + SecureString secret = S3Repository.SECRET_KEY_SETTING.get(repositorySettings)) { + return new BasicAWSCredentials(key.toString(), secret.toString()); + } + } + + static BasicAWSCredentials loadCredentials(Settings settings, String clientName) { try (SecureString accessKey = getConfigValue(settings, clientName, ACCESS_KEY_SETTING); - SecureString secretKey = getConfigValue(settings, clientName, SECRET_KEY_SETTING); - SecureString proxyUsername = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING); - SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING)) { - BasicAWSCredentials credentials = null; + SecureString secretKey = getConfigValue(settings, clientName, SECRET_KEY_SETTING);) { if (accessKey.length() != 0) { if (secretKey.length() != 0) { - credentials = new BasicAWSCredentials(accessKey.toString(), secretKey.toString()); + return new BasicAWSCredentials(accessKey.toString(), secretKey.toString()); } else { throw new IllegalArgumentException("Missing secret key for s3 client [" + clientName + "]"); } } else if (secretKey.length() != 0) { throw new IllegalArgumentException("Missing access key for s3 client [" + clientName + "]"); } + return null; + } + } + + // pkg private for tests + /** Parse settings for a single client. */ + static S3ClientSettings getClientSettings(Settings settings, String clientName) { + final BasicAWSCredentials credentials = S3ClientSettings.loadCredentials(settings, clientName); + try (SecureString proxyUsername = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING); + SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING)) { return new S3ClientSettings( credentials, getConfigValue(settings, clientName, ENDPOINT_SETTING), @@ -187,7 +229,7 @@ static S3ClientSettings getClientSettings(Settings settings, String clientName) private static T getConfigValue(Settings settings, String clientName, Setting.AffixSetting clientSetting) { - Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); + final Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); return concreteSetting.get(settings); } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 51bb6f2024cd4..efd1e9481afc8 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -19,7 +19,8 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.auth.BasicAWSCredentials; + import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; @@ -36,6 +37,8 @@ import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import java.io.IOException; +import java.util.Map; +import java.util.function.Function; /** * Shared file system implementation of the BlobStoreRepository @@ -135,6 +138,8 @@ class S3Repository extends BlobStoreRepository { */ static final Setting CANNED_ACL_SETTING = Setting.simpleString("canned_acl"); + static final Setting CLIENT_NAME = new Setting<>("client", "default", Function.identity()); + /** * Specifies the path within bucket to repository data. Defaults to root directory. */ @@ -144,24 +149,24 @@ class S3Repository extends BlobStoreRepository { private final BlobPath basePath; - private ByteSizeValue chunkSize; + private final ByteSizeValue chunkSize; - private boolean compress; + private final boolean compress; /** * Constructs an s3 backed repository */ - S3Repository(RepositoryMetaData metadata, Settings settings, - NamedXContentRegistry namedXContentRegistry, AwsS3Service s3Service) throws IOException { + S3Repository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry, + AwsS3Service awsService) throws IOException { super(metadata, settings, namedXContentRegistry); - String bucket = BUCKET_SETTING.get(metadata.settings()); + final String bucket = BUCKET_SETTING.get(metadata.settings()); if (bucket == null) { throw new RepositoryException(metadata.name(), "No bucket defined for s3 gateway"); } - boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); - ByteSizeValue bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); + final boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); + final ByteSizeValue bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); this.compress = COMPRESS_SETTING.get(metadata.settings()); @@ -172,17 +177,28 @@ class S3Repository extends BlobStoreRepository { } // Parse and validate the user's S3 Storage Class setting - String storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); - String cannedACL = CANNED_ACL_SETTING.get(metadata.settings()); + final String storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); + final String cannedACL = CANNED_ACL_SETTING.get(metadata.settings()); + final String clientName = CLIENT_NAME.get(metadata.settings()); logger.debug("using bucket [{}], chunk_size [{}], server_side_encryption [{}], " + "buffer_size [{}], cannedACL [{}], storageClass [{}]", bucket, chunkSize, serverSideEncryption, bufferSize, cannedACL, storageClass); - AmazonS3 client = s3Service.client(metadata.settings()); - blobStore = new S3BlobStore(settings, client, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); + // deprecated behavior: override client credentials from the cluster state + // (repository settings) + if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { + deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead " + + "store these in named clients and the elasticsearch keystore for secure settings."); + final BasicAWSCredentials insecureCredentials = S3ClientSettings.loadDeprecatedCredentials(metadata.settings()); + // hack, but that's ok because the whole if branch should be axed + final Map prevSettings = awsService.updateClientsSettings(S3ClientSettings.load(Settings.EMPTY)); + final Map newSettings = S3ClientSettings.overrideCredentials(prevSettings, insecureCredentials); + awsService.updateClientsSettings(newSettings); + } + blobStore = new S3BlobStore(settings, awsService, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); - String basePath = BASE_PATH_SETTING.get(metadata.settings()); + final String basePath = BASE_PATH_SETTING.get(metadata.settings()); if (Strings.hasLength(basePath)) { this.basePath = new BlobPath().add(basePath); } else { diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 573e36975dc4e..0d652a93c361a 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -19,6 +19,7 @@ package org.elasticsearch.repositories.s3; +import java.io.IOException; import java.security.AccessController; import java.security.PrivilegedAction; import java.util.Arrays; @@ -28,6 +29,7 @@ import com.amazonaws.util.json.Jackson; import org.elasticsearch.SpecialPermission; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -58,23 +60,33 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, ReIn }); } - private final Map clientsSettings; + private final AwsS3Service awsS3Service; public S3RepositoryPlugin(Settings settings) { + this.awsS3Service = getAwsS3Service(settings); // eagerly load client settings so that secure settings are read - clientsSettings = S3ClientSettings.load(settings); - assert clientsSettings.isEmpty() == false : "always at least have 'default'"; + final Map clientsSettings = S3ClientSettings.load(settings); + this.awsS3Service.updateClientsSettings(clientsSettings); } - // overridable for tests - protected AwsS3Service createStorageService(Settings settings) { - return new InternalAwsS3Service(settings, clientsSettings); + protected S3RepositoryPlugin(AwsS3Service awsS3Service) { + this.awsS3Service = awsS3Service; + } + + // proxy method for testing + protected S3Repository getS3Repository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry) + throws IOException { + return new S3Repository(metadata, settings, namedXContentRegistry, awsS3Service); + } + + // proxy method for testing + protected AwsS3Service getAwsS3Service(Settings settings) { + return new InternalAwsS3Service(settings); } @Override public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { - return Collections.singletonMap(S3Repository.TYPE, - (metadata) -> new S3Repository(metadata, env.settings(), namedXContentRegistry, createStorageService(env.settings()))); + return Collections.singletonMap(S3Repository.TYPE, (metadata) -> getS3Repository(metadata, env.settings(), namedXContentRegistry)); } @Override @@ -96,7 +108,14 @@ public List> getSettings() { @Override public boolean reinit(Settings settings) { - // TODO clientSettings - return false; + // secure settings should be readable + final Map clientsSettings = S3ClientSettings.load(settings); + awsS3Service.updateClientsSettings(clientsSettings); + return true; + } + + @Override + public void close() { + awsS3Service.releaseCachedClients(); } } diff --git a/plugins/repository-s3/src/main/plugin-metadata/plugin-security.policy b/plugins/repository-s3/src/main/plugin-metadata/plugin-security.policy index d8fca1fc89938..5fd69b4c2fc3f 100644 --- a/plugins/repository-s3/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/repository-s3/src/main/plugin-metadata/plugin-security.policy @@ -37,4 +37,7 @@ grant { // s3 client opens socket connections for to access repository permission java.net.SocketPermission "*", "connect"; + + // only for tests : org.elasticsearch.repositories.s3.S3RepositoryPlugin + permission java.util.PropertyPermission "es.allow_insecure_settings", "read,write"; }; diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java index b40dc75c83701..dd829ee90c12f 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java @@ -19,7 +19,6 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.S3ObjectSummary; @@ -180,13 +179,13 @@ public void testEncryption() { Settings settings = internalCluster().getInstance(Settings.class); Settings bucket = settings.getByPrefix("repositories.s3."); - AmazonS3 s3Client = internalCluster().getInstance(AwsS3Service.class).client(repositorySettings); - - String bucketName = bucket.get("bucket"); - logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath); - List summaries = s3Client.listObjects(bucketName, basePath).getObjectSummaries(); - for (S3ObjectSummary summary : summaries) { - assertThat(s3Client.getObjectMetadata(bucketName, summary.getKey()).getSSEAlgorithm(), equalTo("AES256")); + try (AmazonS3Reference s3Client = internalCluster().getInstance(AwsS3Service.class).client("default")) { + String bucketName = bucket.get("bucket"); + logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath); + List summaries = s3Client.client().listObjects(bucketName, basePath).getObjectSummaries(); + for (S3ObjectSummary summary : summaries) { + assertThat(s3Client.client().getObjectMetadata(bucketName, summary.getKey()).getSSEAlgorithm(), equalTo("AES256")); + } } logger.info("--> delete some data"); @@ -443,8 +442,7 @@ public void cleanRepositoryFiles(String basePath) { // We check that settings has been set in elasticsearch.yml integration test file // as described in README assertThat("Your settings in elasticsearch.yml are incorrect. Check README file.", bucketName, notNullValue()); - AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(Settings.EMPTY); - try { + try (AmazonS3Reference s3Client = internalCluster().getInstance(AwsS3Service.class).client("default")) { ObjectListing prevListing = null; //From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html //we can do at most 1K objects per delete @@ -454,9 +452,9 @@ public void cleanRepositoryFiles(String basePath) { while (true) { ObjectListing list; if (prevListing != null) { - list = client.listNextBatchOfObjects(prevListing); + list = s3Client.client().listNextBatchOfObjects(prevListing); } else { - list = client.listObjects(bucketName, basePath); + list = s3Client.client().listObjects(bucketName, basePath); multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); } for (S3ObjectSummary summary : list.getObjectSummaries()) { @@ -464,7 +462,7 @@ public void cleanRepositoryFiles(String basePath) { //Every 500 objects batch the delete request if (keys.size() > 500) { multiObjectDeleteRequest.setKeys(keys); - client.deleteObjects(multiObjectDeleteRequest); + s3Client.client().deleteObjects(multiObjectDeleteRequest); multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); keys.clear(); } @@ -477,7 +475,7 @@ public void cleanRepositoryFiles(String basePath) { } if (!keys.isEmpty()) { multiObjectDeleteRequest.setKeys(keys); - client.deleteObjects(multiObjectDeleteRequest); + s3Client.client().deleteObjects(multiObjectDeleteRequest); } } catch (Exception ex) { logger.warn((Supplier) () -> new ParameterizedMessage("Failed to delete S3 repository [{}]", bucketName), ex); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java index bcab130e7d531..91b364011b80a 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java @@ -727,4 +727,9 @@ public BucketReplicationConfiguration getBucketReplicationConfiguration(GetBucke public HeadBucketResult headBucket(HeadBucketRequest headBucketRequest) throws AmazonClientException, AmazonServiceException { return delegate.headBucket(headBucketRequest); } + + @Override + public void shutdown() { + delegate.shutdown(); + } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java index 18c701f5fc1a6..6f55f3ed345df 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java @@ -21,77 +21,89 @@ import com.amazonaws.ClientConfiguration; import com.amazonaws.Protocol; -import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.internal.StaticCredentialsProvider; + import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.SecureSetting; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; +import java.util.Locale; +import java.util.Map; + import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; public class AwsS3ServiceImplTests extends ESTestCase { - public void testAWSCredentialsWithSystemProviders() { - S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(Settings.EMPTY, "default"); - AWSCredentialsProvider credentialsProvider = - InternalAwsS3Service.buildCredentials(logger, deprecationLogger, clientSettings, Settings.EMPTY); + public void testAWSCredentialsDefaultToInstanceProviders() { + final String inexistentClientName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); + final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(Settings.EMPTY, inexistentClientName); + final AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, clientSettings); assertThat(credentialsProvider, instanceOf(InternalAwsS3Service.PrivilegedInstanceProfileCredentialsProvider.class)); } - public void testAwsCredsDefaultSettings() { - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("s3.client.default.access_key", "aws_key"); - secureSettings.setString("s3.client.default.secret_key", "aws_secret"); - Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); - assertCredentials(Settings.EMPTY, settings, "aws_key", "aws_secret"); - } - - public void testAwsCredsExplicitConfigSettings() { - Settings repositorySettings = Settings.builder().put(InternalAwsS3Service.CLIENT_NAME.getKey(), "myconfig").build(); - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("s3.client.myconfig.access_key", "aws_key"); - secureSettings.setString("s3.client.myconfig.secret_key", "aws_secret"); - secureSettings.setString("s3.client.default.access_key", "wrong_key"); - secureSettings.setString("s3.client.default.secret_key", "wrong_secret"); - Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); - assertCredentials(repositorySettings, settings, "aws_key", "aws_secret"); - } - - public void testRepositorySettingsCredentialsDisallowed() { - Settings repositorySettings = Settings.builder() - .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "aws_key") - .put(S3Repository.SECRET_KEY_SETTING.getKey(), "aws_secret").build(); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - assertCredentials(repositorySettings, Settings.EMPTY, "aws_key", "aws_secret")); - assertThat(e.getMessage(), containsString("Setting [access_key] is insecure")); - } - - public void testRepositorySettingsCredentialsMissingKey() { - Settings repositorySettings = Settings.builder().put(S3Repository.SECRET_KEY_SETTING.getKey(), "aws_secret").build(); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - assertCredentials(repositorySettings, Settings.EMPTY, "aws_key", "aws_secret")); - assertThat(e.getMessage(), containsString("must be accompanied by setting [access_key]")); + public void testAWSCredentialsFromKeystore() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + final String clientNamePrefix = "some_client_name_"; + final int clientsCount = randomIntBetween(0, 4); + for (int i = 0; i < clientsCount; i++) { + final String clientName = clientNamePrefix + i; + secureSettings.setString("s3.client." + clientName + ".access_key", clientName + "_aws_access_key"); + secureSettings.setString("s3.client." + clientName + ".secret_key", clientName + "_aws_secret_key"); + } + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + final Map allClientsSettings = S3ClientSettings.load(settings); + // no less, no more + assertThat(allClientsSettings.size(), is(clientsCount + 1)); // including default + for (int i = 0; i < clientsCount; i++) { + final String clientName = clientNamePrefix + i; + final S3ClientSettings someClientSettings = allClientsSettings.get(clientName); + final AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, someClientSettings); + assertThat(credentialsProvider, instanceOf(StaticCredentialsProvider.class)); + assertThat(credentialsProvider.getCredentials().getAWSAccessKeyId(), is(clientName + "_aws_access_key")); + assertThat(credentialsProvider.getCredentials().getAWSSecretKey(), is(clientName + "_aws_secret_key")); + } + // test default exists and is an Instance provider + final S3ClientSettings defaultClientSettings = allClientsSettings.get("default"); + final AWSCredentialsProvider defaultCredentialsProvider = InternalAwsS3Service.buildCredentials(logger, defaultClientSettings); + assertThat(defaultCredentialsProvider, instanceOf(InternalAwsS3Service.PrivilegedInstanceProfileCredentialsProvider.class)); } - public void testRepositorySettingsCredentialsMissingSecret() { - Settings repositorySettings = Settings.builder().put(S3Repository.ACCESS_KEY_SETTING.getKey(), "aws_key").build(); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - assertCredentials(repositorySettings, Settings.EMPTY, "aws_key", "aws_secret")); - assertThat(e.getMessage(), containsString("must be accompanied by setting [secret_key]")); + public void testSetDefaultCredential() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + final String awsAccessKey = randomAlphaOfLength(8); + final String awsSecretKey = randomAlphaOfLength(8); + secureSettings.setString("s3.client.default.access_key", awsAccessKey); + secureSettings.setString("s3.client.default.secret_key", awsSecretKey); + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + final Map allClientsSettings = S3ClientSettings.load(settings); + assertThat(allClientsSettings.size(), is(1)); + // test default exists and is an Instance provider + final S3ClientSettings defaultClientSettings = allClientsSettings.get("default"); + final AWSCredentialsProvider defaultCredentialsProvider = InternalAwsS3Service.buildCredentials(logger, defaultClientSettings); + assertThat(defaultCredentialsProvider, instanceOf(StaticCredentialsProvider.class)); + assertThat(defaultCredentialsProvider.getCredentials().getAWSAccessKeyId(), is(awsAccessKey)); + assertThat(defaultCredentialsProvider.getCredentials().getAWSSecretKey(), is(awsSecretKey)); } - private void assertCredentials(Settings singleRepositorySettings, Settings settings, - String expectedKey, String expectedSecret) { - String configName = InternalAwsS3Service.CLIENT_NAME.get(singleRepositorySettings); - S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, configName); - AWSCredentials credentials = InternalAwsS3Service.buildCredentials(logger, deprecationLogger, - clientSettings, singleRepositorySettings).getCredentials(); - assertThat(credentials.getAWSAccessKeyId(), is(expectedKey)); - assertThat(credentials.getAWSSecretKey(), is(expectedSecret)); + public void testCredentialsIncomplete() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + final String clientName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); + final boolean missingOrMissing = randomBoolean(); + if (missingOrMissing) { + secureSettings.setString("s3.client." + clientName + ".access_key", "aws_access_key"); + } else { + secureSettings.setString("s3.client." + clientName + ".secret_key", "aws_secret_key"); + } + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + final Exception e = expectThrows(IllegalArgumentException.class, () -> S3ClientSettings.load(settings)); + if (missingOrMissing) { + assertThat(e.getMessage(), containsString("Missing secret key for s3 client [" + clientName + "]")); + } else { + assertThat(e.getMessage(), containsString("Missing access key for s3 client [" + clientName + "]")); + } } public void testAWSDefaultConfiguration() { @@ -100,10 +112,10 @@ public void testAWSDefaultConfiguration() { } public void testAWSConfigurationWithAwsSettings() { - MockSecureSettings secureSettings = new MockSecureSettings(); + final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("s3.client.default.proxy.username", "aws_proxy_username"); secureSettings.setString("s3.client.default.proxy.password", "aws_proxy_password"); - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(secureSettings) .put("s3.client.default.protocol", "http") .put("s3.client.default.proxy.host", "aws_proxy_host") @@ -115,7 +127,7 @@ public void testAWSConfigurationWithAwsSettings() { } public void testRepositoryMaxRetries() { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .put("s3.client.default.max_retries", 5) .build(); launchAWSConfigurationTest(settings, Protocol.HTTPS, null, -1, null, @@ -125,7 +137,7 @@ public void testRepositoryMaxRetries() { public void testRepositoryThrottleRetries() { final boolean throttling = randomBoolean(); - Settings settings = Settings.builder().put("s3.client.default.use_throttle_retries", throttling).build(); + final Settings settings = Settings.builder().put("s3.client.default.use_throttle_retries", throttling).build(); launchAWSConfigurationTest(settings, Protocol.HTTPS, null, -1, null, null, 3, throttling, 50000); } @@ -139,8 +151,8 @@ private void launchAWSConfigurationTest(Settings settings, boolean expectedUseThrottleRetries, int expectedReadTimeout) { - S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, "default"); - ClientConfiguration configuration = InternalAwsS3Service.buildConfiguration(clientSettings); + final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, "default"); + final ClientConfiguration configuration = InternalAwsS3Service.buildConfiguration(clientSettings); assertThat(configuration.getResponseMetadataCacheSize(), is(0)); assertThat(configuration.getProtocol(), is(expectedProtocol)); @@ -154,15 +166,15 @@ private void launchAWSConfigurationTest(Settings settings, } public void testEndpointSetting() { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .put("s3.client.default.endpoint", "s3.endpoint") .build(); assertEndpoint(Settings.EMPTY, settings, "s3.endpoint"); } private void assertEndpoint(Settings repositorySettings, Settings settings, String expectedEndpoint) { - String configName = InternalAwsS3Service.CLIENT_NAME.get(repositorySettings); - S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, configName); + final String configName = S3Repository.CLIENT_NAME.get(repositorySettings); + final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, configName); assertThat(clientSettings.endpoint, is(expectedEndpoint)); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java index a090fdd5281fd..f6d39e4d4d0bd 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java @@ -208,6 +208,11 @@ public void deleteObject(DeleteObjectRequest deleteObjectRequest) blobs.remove(blobName); } + + @Override + public void shutdown() { + // TODO check close + } private int getSize(InputStream stream) throws IOException { int size = stream.read(byteCounter); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java new file mode 100644 index 0000000000000..c42403503e0c8 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -0,0 +1,211 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.s3; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.s3.AmazonS3; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.security.AccessController; +import java.security.PrivilegedAction; + +import static org.hamcrest.Matchers.is; + +@SuppressForbidden(reason = "test fixture requires System.setProperty") +public class RepositoryCredentialsTests extends ESTestCase { + + static { + AccessController.doPrivileged((PrivilegedAction) () -> { + // required for client settings overwriting + System.setProperty("es.allow_insecure_settings", "true"); + return null; + }); + } + + static final class ProxyS3RepositoryPlugin extends S3RepositoryPlugin { + + static final class ClientAndCredentials extends AmazonS3Wrapper { + final AWSCredentialsProvider credentials; + + ClientAndCredentials(AmazonS3 delegate, AWSCredentialsProvider credentials) { + super(delegate); + this.credentials = credentials; + } + + @Override + public boolean doesBucketExist(String bucketName) { + return true; + } + } + + static final class ProxyInternalAwsS3Service extends InternalAwsS3Service { + + ProxyInternalAwsS3Service(Settings settings) { + super(settings); + } + + @Override + AmazonS3 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) { + final AmazonS3 client = super.buildClient(credentials, configuration); + return new ClientAndCredentials(client, credentials); + } + + } + + protected ProxyS3RepositoryPlugin(Settings settings) { + super(settings); + } + + @Override + protected AwsS3Service getAwsS3Service(Settings settings) { + return new ProxyInternalAwsS3Service(settings); + } + + } + + public void testRepositoryCredentialsOverrideSecureCredentials() throws IOException { + final int clientsCount = randomIntBetween(0, 4); + final String[] clientNames = new String[clientsCount + 1]; + clientNames[0] = "default"; + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.access_key", "secure_aws_key"); + secureSettings.setString("s3.client.default.secret_key", "secure_aws_secret"); + for (int i = 0; i < clientsCount; i++) { + final String clientName = "client_" + i; + secureSettings.setString("s3.client." + clientName + ".access_key", "secure_aws_key_" + i); + secureSettings.setString("s3.client." + clientName + ".secret_key", "secure_aws_secret_" + i); + clientNames[i + 1] = clientName; + } + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + // repository settings for credentials override node secure settings + final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder() + .put(S3Repository.CLIENT_NAME.getKey(), randomFrom(clientNames)) + .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key") + .put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret").build()); + try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings); + S3Repository s3repo = s3Plugin.getS3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY); + AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { + final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials(); + assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); + assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); + } + assertWarnings( + "[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release!" + + " See the breaking changes documentation for the next major version.", + "Using s3 access/secret key from repository settings. Instead store these in named clients and" + + " the elasticsearch keystore for secure settings.", + "[access_key] setting was deprecated in Elasticsearch and will be removed in a future release!" + + " See the breaking changes documentation for the next major version."); + } + + public void testRepositoryCredentialsOnly() throws IOException { + // repository settings for credentials override node secure settings + final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", + Settings.builder() + .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key") + .put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret") + .build()); + try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(Settings.EMPTY); + S3Repository s3repo = s3Plugin.getS3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY); + AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { + final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials(); + assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); + assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); + } + assertWarnings( + "[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release!" + + " See the breaking changes documentation for the next major version.", + "Using s3 access/secret key from repository settings. Instead store these in named clients and" + + " the elasticsearch keystore for secure settings.", + "[access_key] setting was deprecated in Elasticsearch and will be removed in a future release!" + + " See the breaking changes documentation for the next major version."); + } + + public void testReinitSecureCredentials() throws IOException { + final String clientName = randomFrom("default", "some_client"); + // initial client node settings + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client." + clientName + ".access_key", "secure_aws_key"); + secureSettings.setString("s3.client." + clientName + ".secret_key", "secure_aws_secret"); + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + // repository settings + final Settings.Builder builder = Settings.builder().put(S3Repository.CLIENT_NAME.getKey(), clientName); + final boolean repositorySettings = randomBoolean(); + if (repositorySettings) { + builder.put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key"); + builder.put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret"); + } + final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", builder.build()); + try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings); + S3Repository s3repo = s3Plugin.getS3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY)) { + try (AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { + final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials + .getCredentials(); + if (repositorySettings) { + assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); + assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); + } else { + assertThat(credentials.getAWSAccessKeyId(), is("secure_aws_key")); + assertThat(credentials.getAWSSecretKey(), is("secure_aws_secret")); + } + // new settings + final MockSecureSettings newSecureSettings = new MockSecureSettings(); + newSecureSettings.setString("s3.client." + clientName + ".access_key", "new_secret_aws_key"); + newSecureSettings.setString("s3.client." + clientName + ".secret_key", "new_secret_aws_secret"); + final Settings newSettings = Settings.builder().setSecureSettings(newSecureSettings).build(); + // reload S3 plugin settings + s3Plugin.reinit(newSettings); + // check the not-yet-closed client reference still has the same credentials + if (repositorySettings) { + assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); + assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); + } else { + assertThat(credentials.getAWSAccessKeyId(), is("secure_aws_key")); + assertThat(credentials.getAWSSecretKey(), is("secure_aws_secret")); + } + } + // check credentials have been updated + try (AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { + final AWSCredentials newCredentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials + .getCredentials(); + assertThat(newCredentials.getAWSAccessKeyId(), is("new_secret_aws_key")); + assertThat(newCredentials.getAWSSecretKey(), is("new_secret_aws_secret")); + } + } + if (repositorySettings) { + assertWarnings( + "[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release!" + + " See the breaking changes documentation for the next major version.", + "Using s3 access/secret key from repository settings. Instead store these in named clients and" + + " the elasticsearch keystore for secure settings.", + "[access_key] setting was deprecated in Elasticsearch and will be removed in a future release!" + + " See the breaking changes documentation for the next major version."); + } + } + +} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositorySettingsCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositorySettingsCredentialsTests.java deleted file mode 100644 index c3e7069fdfd65..0000000000000 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositorySettingsCredentialsTests.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.s3; - -import com.amazonaws.auth.AWSCredentials; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESTestCase; - -public class RepositorySettingsCredentialsTests extends ESTestCase { - - public void testRepositorySettingsCredentials() { - Settings repositorySettings = Settings.builder() - .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "aws_key") - .put(S3Repository.SECRET_KEY_SETTING.getKey(), "aws_secret").build(); - AWSCredentials credentials = InternalAwsS3Service.buildCredentials(logger, deprecationLogger, - S3ClientSettings.getClientSettings(Settings.EMPTY, "default"), repositorySettings).getCredentials(); - assertEquals("aws_key", credentials.getAWSAccessKeyId()); - assertEquals("aws_secret", credentials.getAWSSecretKey()); - assertSettingDeprecationsAndWarnings(new Setting[] { S3Repository.ACCESS_KEY_SETTING, S3Repository.SECRET_KEY_SETTING }, - "Using s3 access/secret key from repository settings. " + - "Instead store these in named clients and the elasticsearch keystore for secure settings."); - } -} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java index 5998540e7a8fa..65786ebf90b49 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java @@ -64,6 +64,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.doAnswer; public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase { @@ -81,19 +82,25 @@ public static void openMockSocket() throws IOException { try { // Accept connections from MockAmazonS3. mockS3ServerSocket.accept(); - } catch (IOException e) { + } catch (final IOException e) { } } }); mockS3AcceptorThread.start(); } + @Override protected BlobStore newBlobStore() throws IOException { - MockAmazonS3 client = new MockAmazonS3(mockS3ServerSocket.getLocalPort()); - String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - - return new S3BlobStore(Settings.EMPTY, client, bucket, false, - new ByteSizeValue(10, ByteSizeUnit.MB), "public-read-write", "standard"); + final AmazonS3Reference clientReference = new AmazonS3Reference(new MockAmazonS3(mockS3ServerSocket.getLocalPort())); + final InternalAwsS3Service awsService = mock(InternalAwsS3Service.class); + doAnswer(invocation -> { + clientReference.incRef(); + return clientReference; + }).when(awsService).client(any(String.class)); + final String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + + return new S3BlobStore(Settings.EMPTY, awsService, "default", bucket, false, new ByteSizeValue(10, ByteSizeUnit.MB), + "public-read-write", "standard"); } public void testExecuteSingleUploadBlobSizeTooLarge() throws IOException { @@ -101,7 +108,7 @@ public void testExecuteSingleUploadBlobSizeTooLarge() throws IOException { final S3BlobStore blobStore = mock(S3BlobStore.class); final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> blobContainer.executeSingleUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize)); assertEquals("Upload request size [" + blobSize + "] can't be larger than 5gb", e.getMessage()); } @@ -113,7 +120,7 @@ public void testExecuteSingleUploadBlobSizeLargerThanBufferSize() throws IOExcep final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore); final String blobName = randomAlphaOfLengthBetween(1, 10); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> blobContainer.executeSingleUpload(blobStore, blobName, new ByteArrayInputStream(new byte[0]), ByteSizeUnit.MB.toBytes(2))); assertEquals("Upload request size [2097152] can't be larger than buffer size", e.getMessage()); } @@ -148,7 +155,8 @@ public void testExecuteSingleUpload() throws IOException { } final AmazonS3 client = mock(AmazonS3.class); - when(blobStore.client()).thenReturn(client); + final AmazonS3Reference clientReference = new AmazonS3Reference(client); + when(blobStore.clientReference()).thenReturn(clientReference); final ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(PutObjectRequest.class); when(client.putObject(argumentCaptor.capture())).thenReturn(new PutObjectResult()); @@ -173,7 +181,7 @@ public void testExecuteMultipartUploadBlobSizeTooLarge() throws IOException { final S3BlobStore blobStore = mock(S3BlobStore.class); final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> blobContainer.executeMultipartUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize) ); assertEquals("Multipart upload request size [" + blobSize + "] can't be larger than 5tb", e.getMessage()); @@ -184,7 +192,7 @@ public void testExecuteMultipartUploadBlobSizeTooSmall() throws IOException { final S3BlobStore blobStore = mock(S3BlobStore.class); final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> blobContainer.executeMultipartUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize) ); assertEquals("Multipart upload request size [" + blobSize + "] can't be smaller than 5mb", e.getMessage()); @@ -218,7 +226,8 @@ public void testExecuteMultipartUpload() throws IOException { } final AmazonS3 client = mock(AmazonS3.class); - when(blobStore.client()).thenReturn(client); + final AmazonS3Reference clientReference = new AmazonS3Reference(client); + when(blobStore.clientReference()).thenReturn(clientReference); final ArgumentCaptor initArgCaptor = ArgumentCaptor.forClass(InitiateMultipartUploadRequest.class); final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult(); @@ -228,7 +237,7 @@ public void testExecuteMultipartUpload() throws IOException { final ArgumentCaptor uploadArgCaptor = ArgumentCaptor.forClass(UploadPartRequest.class); final List expectedEtags = new ArrayList<>(); - long partSize = Math.min(bufferSize, blobSize); + final long partSize = Math.min(bufferSize, blobSize); long totalBytes = 0; do { expectedEtags.add(randomAlphaOfLength(50)); @@ -265,7 +274,7 @@ public void testExecuteMultipartUpload() throws IOException { assertEquals(numberOfParts.v1().intValue(), uploadRequests.size()); for (int i = 0; i < uploadRequests.size(); i++) { - UploadPartRequest uploadRequest = uploadRequests.get(i); + final UploadPartRequest uploadRequest = uploadRequests.get(i); assertEquals(bucketName, uploadRequest.getBucketName()); assertEquals(blobPath.buildAsString() + blobName, uploadRequest.getKey()); @@ -287,7 +296,7 @@ public void testExecuteMultipartUpload() throws IOException { assertEquals(blobPath.buildAsString() + blobName, compRequest.getKey()); assertEquals(initResult.getUploadId(), compRequest.getUploadId()); - List actualETags = compRequest.getPartETags().stream().map(PartETag::getETag).collect(Collectors.toList()); + final List actualETags = compRequest.getPartETags().stream().map(PartETag::getETag).collect(Collectors.toList()); assertEquals(expectedEtags, actualETags); } @@ -305,7 +314,11 @@ public void testExecuteMultipartUploadAborted() throws IOException { when(blobStore.getStorageClass()).thenReturn(randomFrom(StorageClass.values())); final AmazonS3 client = mock(AmazonS3.class); - when(blobStore.client()).thenReturn(client); + final AmazonS3Reference clientReference = new AmazonS3Reference(client); + doAnswer(invocation -> { + clientReference.incRef(); + return clientReference; + }).when(blobStore).clientReference(); final String uploadId = randomAlphaOfLength(25); @@ -387,7 +400,7 @@ public void testExecuteMultipartUploadAborted() throws IOException { } public void testNumberOfMultipartsWithZeroPartSize() { - IllegalArgumentException e = + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> S3BlobContainer.numberOfMultiparts(randomNonNegativeLong(), 0L)); assertEquals("Part size must be greater than zero", e.getMessage()); } @@ -409,7 +422,7 @@ public void testNumberOfMultiparts() { // Fits in N parts plus a bit more final long remaining = randomIntBetween(1, (size > Integer.MAX_VALUE) ? Integer.MAX_VALUE : (int) size - 1); - assertNumberOfMultiparts(factor + 1, remaining, size * factor + remaining, size); + assertNumberOfMultiparts(factor + 1, remaining, (size * factor) + remaining, size); } private static void assertNumberOfMultiparts(final int expectedParts, final long expectedRemaining, long totalSize, long partSize) { diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index 93508f11c097a..7c4505f2a8376 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.services.s3.AbstractAmazonS3; -import com.amazonaws.services.s3.AmazonS3; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -31,18 +30,25 @@ import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; - import java.io.IOException; +import java.util.Collections; +import java.util.Map; import static org.hamcrest.Matchers.containsString; public class S3RepositoryTests extends ESTestCase { private static class DummyS3Client extends AbstractAmazonS3 { + @Override public boolean doesBucketExist(String bucketName) { return true; } + + @Override + public void shutdown() { + // TODO check is closed + } } private static class DummyS3Service extends AbstractLifecycleComponent implements AwsS3Service { @@ -56,53 +62,69 @@ protected void doStop() {} @Override protected void doClose() {} @Override - public AmazonS3 client(Settings settings) { - return new DummyS3Client(); + public AmazonS3Reference client(String clientName) { + return new AmazonS3Reference(new DummyS3Client()); + } + + @Override + public Map updateClientsSettings(Map clientsSettings) { + return Collections.emptyMap(); + } + + @Override + public void releaseCachedClients() { } } public void testInvalidChunkBufferSizeSettings() throws IOException { // chunk < buffer should fail - assertInvalidBuffer(10, 5, RepositoryException.class, "chunk_size (5mb) can't be lower than buffer_size (10mb)."); + final Settings s1 = bufferAndChunkSettings(10, 5); + final Exception e1 = expectThrows(RepositoryException.class, + () -> new S3Repository(getRepositoryMetaData(s1), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())); + assertThat(e1.getMessage(), containsString("chunk_size (5mb) can't be lower than buffer_size (10mb)")); // chunk > buffer should pass - assertValidBuffer(5, 10); + final Settings s2 = bufferAndChunkSettings(5, 10); + new S3Repository(getRepositoryMetaData(s2), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()).close(); // chunk = buffer should pass - assertValidBuffer(5, 5); + final Settings s3 = bufferAndChunkSettings(5, 5); + new S3Repository(getRepositoryMetaData(s3), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()).close(); // buffer < 5mb should fail - assertInvalidBuffer(4, 10, IllegalArgumentException.class, - "Failed to parse value [4mb] for setting [buffer_size] must be >= 5mb"); + final Settings s4 = bufferAndChunkSettings(4, 10); + final Exception e4 = expectThrows(IllegalArgumentException.class, + () -> new S3Repository(getRepositoryMetaData(s4), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())); + assertThat(e4.getMessage(), containsString("Failed to parse value [4mb] for setting [buffer_size] must be >= 5mb")); // chunk > 5tb should fail - assertInvalidBuffer(5, 6000000, IllegalArgumentException.class, - "Failed to parse value [6000000mb] for setting [chunk_size] must be <= 5tb"); + final Settings s5 = bufferAndChunkSettings(5, 6000000); + final Exception e5 = expectThrows(IllegalArgumentException.class, + () -> new S3Repository(getRepositoryMetaData(s5), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())); + assertThat(e5.getMessage(), containsString("Failed to parse value [6000000mb] for setting [chunk_size] must be <= 5tb")); } - private void assertValidBuffer(long bufferMB, long chunkMB) throws IOException { - RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder() - .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(bufferMB, ByteSizeUnit.MB).getStringRep()) - .put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkMB, ByteSizeUnit.MB).getStringRep()).build()); - new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()); + private Settings bufferAndChunkSettings(long buffer, long chunk) { + return Settings.builder() + .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(buffer, ByteSizeUnit.MB).getStringRep()) + .put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunk, ByteSizeUnit.MB).getStringRep()) + .build(); } - private void assertInvalidBuffer(int bufferMB, int chunkMB, Class clazz, String msg) throws IOException { - RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder() - .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(bufferMB, ByteSizeUnit.MB).getStringRep()) - .put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkMB, ByteSizeUnit.MB).getStringRep()).build()); - - Exception e = expectThrows(clazz, () -> new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, - new DummyS3Service())); - assertThat(e.getMessage(), containsString(msg)); + private RepositoryMetaData getRepositoryMetaData(Settings settings) { + return new RepositoryMetaData("dummy-repo", "mock", Settings.builder().put(settings).build()); } public void testBasePathSetting() throws IOException { - RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder() - .put(S3Repository.BASE_PATH_SETTING.getKey(), "foo/bar").build()); - S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()); - assertEquals("foo/bar/", s3repo.basePath().buildAsString()); + final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder() + .put(S3Repository.BASE_PATH_SETTING.getKey(), "foo/bar").build()); + try (S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())) { + assertEquals("foo/bar/", s3repo.basePath().buildAsString()); + } } - public void testDefaultBufferSize() { - ByteSizeValue defaultBufferSize = S3Repository.BUFFER_SIZE_SETTING.get(Settings.EMPTY); - assertThat(defaultBufferSize, Matchers.lessThanOrEqualTo(new ByteSizeValue(100, ByteSizeUnit.MB))); - assertThat(defaultBufferSize, Matchers.greaterThanOrEqualTo(new ByteSizeValue(5, ByteSizeUnit.MB))); + public void testDefaultBufferSize() throws IOException { + final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.EMPTY); + try (S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())) { + final long defaultBufferSize = ((S3BlobStore) s3repo.blobStore()).bufferSizeInBytes(); + assertThat(defaultBufferSize, Matchers.lessThanOrEqualTo(100L * 1024 * 1024)); + assertThat(defaultBufferSize, Matchers.greaterThanOrEqualTo(5L * 1024 * 1024)); + } } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAmazonS3.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAmazonS3.java index 93bf58cc28964..0c762659a5fe0 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAmazonS3.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAmazonS3.java @@ -51,7 +51,7 @@ public class TestAmazonS3 extends AmazonS3Wrapper { private double writeFailureRate = 0.0; private double readFailureRate = 0.0; - private String randomPrefix; + private final String randomPrefix; ConcurrentMap accessCounts = new ConcurrentHashMap<>(); @@ -76,18 +76,18 @@ public TestAmazonS3(AmazonS3 delegate, Settings settings) { @Override public PutObjectResult putObject(String bucketName, String key, InputStream input, ObjectMetadata metadata) throws AmazonClientException, AmazonServiceException { if (shouldFail(bucketName, key, writeFailureRate)) { - long length = metadata.getContentLength(); - long partToRead = (long) (length * randomDouble()); - byte[] buffer = new byte[1024]; + final long length = metadata.getContentLength(); + final long partToRead = (long) (length * randomDouble()); + final byte[] buffer = new byte[1024]; for (long cur = 0; cur < partToRead; cur += buffer.length) { try { - input.read(buffer, 0, (int) (partToRead - cur > buffer.length ? buffer.length : partToRead - cur)); - } catch (IOException ex) { + input.read(buffer, 0, (int) ((partToRead - cur) > buffer.length ? buffer.length : partToRead - cur)); + } catch (final IOException ex) { throw new ElasticsearchException("cannot read input stream", ex); } } logger.info("--> random write failure on putObject method: throwing an exception for [bucket={}, key={}]", bucketName, key); - AmazonS3Exception ex = new AmazonS3Exception("Random S3 exception"); + final AmazonS3Exception ex = new AmazonS3Exception("Random S3 exception"); ex.setStatusCode(400); ex.setErrorCode("RequestTimeout"); throw ex; @@ -99,18 +99,18 @@ public PutObjectResult putObject(String bucketName, String key, InputStream inpu @Override public UploadPartResult uploadPart(UploadPartRequest request) throws AmazonClientException, AmazonServiceException { if (shouldFail(request.getBucketName(), request.getKey(), writeFailureRate)) { - long length = request.getPartSize(); - long partToRead = (long) (length * randomDouble()); - byte[] buffer = new byte[1024]; + final long length = request.getPartSize(); + final long partToRead = (long) (length * randomDouble()); + final byte[] buffer = new byte[1024]; for (long cur = 0; cur < partToRead; cur += buffer.length) { try (InputStream input = request.getInputStream()){ - input.read(buffer, 0, (int) (partToRead - cur > buffer.length ? buffer.length : partToRead - cur)); - } catch (IOException ex) { + input.read(buffer, 0, (int) ((partToRead - cur) > buffer.length ? buffer.length : partToRead - cur)); + } catch (final IOException ex) { throw new ElasticsearchException("cannot read input stream", ex); } } logger.info("--> random write failure on uploadPart method: throwing an exception for [bucket={}, key={}]", request.getBucketName(), request.getKey()); - AmazonS3Exception ex = new AmazonS3Exception("Random S3 write exception"); + final AmazonS3Exception ex = new AmazonS3Exception("Random S3 write exception"); ex.setStatusCode(400); ex.setErrorCode("RequestTimeout"); throw ex; @@ -123,7 +123,7 @@ public UploadPartResult uploadPart(UploadPartRequest request) throws AmazonClien public S3Object getObject(String bucketName, String key) throws AmazonClientException, AmazonServiceException { if (shouldFail(bucketName, key, readFailureRate)) { logger.info("--> random read failure on getObject method: throwing an exception for [bucket={}, key={}]", bucketName, key); - AmazonS3Exception ex = new AmazonS3Exception("Random S3 read exception"); + final AmazonS3Exception ex = new AmazonS3Exception("Random S3 read exception"); ex.setStatusCode(404); throw ex; } else { @@ -135,7 +135,7 @@ private boolean shouldFail(String bucketName, String key, double probability) { if (probability > 0.0) { String path = randomPrefix + "-" + bucketName + "+" + key; path += "/" + incrementAndGet(path); - return Math.abs(hashCode(path)) < Integer.MAX_VALUE * probability; + return Math.abs(hashCode(path)) < (Integer.MAX_VALUE * probability); } else { return false; } @@ -143,14 +143,14 @@ private boolean shouldFail(String bucketName, String key, double probability) { private int hashCode(String path) { try { - MessageDigest digest = MessageDigest.getInstance("MD5"); - byte[] bytes = digest.digest(path.getBytes("UTF-8")); + final MessageDigest digest = MessageDigest.getInstance("MD5"); + final byte[] bytes = digest.digest(path.getBytes("UTF-8")); int i = 0; return ((bytes[i++] & 0xFF) << 24) | ((bytes[i++] & 0xFF) << 16) | ((bytes[i++] & 0xFF) << 8) | (bytes[i++] & 0xFF); - } catch (UnsupportedEncodingException ex) { + } catch (final UnsupportedEncodingException ex) { throw new ElasticsearchException("cannot calculate hashcode", ex); - } catch (NoSuchAlgorithmException ex) { + } catch (final NoSuchAlgorithmException ex) { throw new ElasticsearchException("cannot calculate hashcode", ex); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java index 522ca06614c00..85a11d722cbe7 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java @@ -22,46 +22,39 @@ import java.util.IdentityHashMap; import com.amazonaws.services.s3.AmazonS3; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.Settings; public class TestAwsS3Service extends InternalAwsS3Service { public static class TestPlugin extends S3RepositoryPlugin { public TestPlugin(Settings settings) { - super(settings); - } - @Override - protected AwsS3Service createStorageService(Settings settings) { - return new TestAwsS3Service(settings); + super(new TestAwsS3Service(settings)); } } - IdentityHashMap clients = new IdentityHashMap<>(); + IdentityHashMap clients = new IdentityHashMap<>(); public TestAwsS3Service(Settings settings) { - super(settings, S3ClientSettings.load(settings)); + super(settings); } @Override - public synchronized AmazonS3 client(Settings repositorySettings) { - return cachedWrapper(super.client(repositorySettings)); + public synchronized AmazonS3Reference client(String clientName) { + return new AmazonS3Reference(cachedWrapper(super.client(clientName))); } - private AmazonS3 cachedWrapper(AmazonS3 client) { - TestAmazonS3 wrapper = clients.get(client); + private AmazonS3 cachedWrapper(AmazonS3Reference clientReference) { + TestAmazonS3 wrapper = clients.get(clientReference); if (wrapper == null) { - wrapper = new TestAmazonS3(client, settings); - clients.put(client, wrapper); + wrapper = new TestAmazonS3(clientReference.client(), settings); + clients.put(clientReference, wrapper); } return wrapper; } @Override - protected synchronized void doClose() throws ElasticsearchException { - super.doClose(); + public synchronized void releaseCachedClients() { + super.releaseCachedClients(); clients.clear(); } - } From d447a4eef3161b7eaa8f48f441c0ed1d4eb41920 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 27 Mar 2018 11:14:32 +0300 Subject: [PATCH 05/21] Update ec2 secure settings (#29134) --- .../discovery/ec2/AmazonEc2Reference.java | 61 +++++++ .../discovery/ec2/AwsEc2Service.java | 79 ++++----- .../discovery/ec2/AwsEc2ServiceImpl.java | 156 ++++++++++-------- .../ec2/AwsEc2UnicastHostsProvider.java | 43 +++-- .../discovery/ec2/Ec2ClientSettings.java | 145 ++++++++++++++++ .../discovery/ec2/Ec2DiscoveryPlugin.java | 70 ++++---- ...{AmazonEC2Mock.java => AmazonEc2Mock.java} | 19 ++- .../discovery/ec2/AwsEc2ServiceImplTests.java | 33 ++-- .../discovery/ec2/AwsEc2ServiceMock.java | 33 +--- .../Ec2DiscoveryClusterFormationTests.java | 6 +- .../discovery/ec2/Ec2DiscoveryPluginMock.java | 38 +++++ .../ec2/Ec2DiscoveryPluginTests.java | 93 ++++++++++- .../discovery/ec2/Ec2DiscoveryTests.java | 66 ++++---- .../repositories/s3/S3ClientSettings.java | 2 +- 14 files changed, 571 insertions(+), 273 deletions(-) create mode 100644 plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AmazonEc2Reference.java create mode 100644 plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java rename plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/{AmazonEC2Mock.java => AmazonEc2Mock.java} (99%) create mode 100644 plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginMock.java diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AmazonEc2Reference.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AmazonEc2Reference.java new file mode 100644 index 0000000000000..9462738a539f4 --- /dev/null +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AmazonEc2Reference.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.ec2; + +import com.amazonaws.services.ec2.AmazonEC2; + +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.util.concurrent.AbstractRefCounted; + +/** + * Handles the shutdown of the wrapped {@link AmazonEC2Client} using reference + * counting. + */ +public class AmazonEc2Reference extends AbstractRefCounted implements Releasable { + + private final AmazonEC2 client; + + AmazonEc2Reference(AmazonEC2 client) { + super("AWS_EC2_CLIENT"); + this.client = client; + } + + /** + * Call when the client is not needed anymore. + */ + @Override + public void close() { + decRef(); + } + + /** + * Returns the underlying `AmazonEC2` client. All method calls are permitted BUT + * NOT shutdown. Shutdown is called when reference count reaches 0. + */ + public AmazonEC2 client() { + return client; + } + + @Override + protected void closeInternal() { + client.shutdown(); + } + +} diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Service.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Service.java index 880be6c037323..9765ce6e1bdfc 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Service.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Service.java @@ -19,20 +19,12 @@ package org.elasticsearch.discovery.ec2; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.Protocol; -import com.amazonaws.services.ec2.AmazonEC2; -import org.elasticsearch.common.settings.SecureSetting; -import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; - import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Locale; import java.util.function.Function; interface AwsEc2Service { @@ -46,36 +38,6 @@ class HostType { public static final String TAG_PREFIX = "tag:"; } - /** The access key (ie login id) for connecting to ec2. */ - Setting ACCESS_KEY_SETTING = SecureSetting.secureString("discovery.ec2.access_key", null); - - /** The secret key (ie password) for connecting to ec2. */ - Setting SECRET_KEY_SETTING = SecureSetting.secureString("discovery.ec2.secret_key", null); - - /** An override for the ec2 endpoint to connect to. */ - Setting ENDPOINT_SETTING = new Setting<>("discovery.ec2.endpoint", "", - s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); - - /** The protocol to use to connect to to ec2. */ - Setting PROTOCOL_SETTING = new Setting<>("discovery.ec2.protocol", "https", - s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope); - - /** The host name of a proxy to connect to ec2 through. */ - Setting PROXY_HOST_SETTING = Setting.simpleString("discovery.ec2.proxy.host", Property.NodeScope); - - /** The port of a proxy to connect to ec2 through. */ - Setting PROXY_PORT_SETTING = Setting.intSetting("discovery.ec2.proxy.port", 80, 0, 1<<16, Property.NodeScope); - - /** The username of a proxy to connect to s3 through. */ - Setting PROXY_USERNAME_SETTING = SecureSetting.secureString("discovery.ec2.proxy.username", null); - - /** The password of a proxy to connect to s3 through. */ - Setting PROXY_PASSWORD_SETTING = SecureSetting.secureString("discovery.ec2.proxy.password", null); - - /** The socket timeout for connecting to s3. */ - Setting READ_TIMEOUT_SETTING = Setting.timeSetting("discovery.ec2.read_timeout", - TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope); - /** * discovery.ec2.host_type: The type of host type to use to communicate with other instances. * Can be one of private_ip, public_ip, private_dns, public_dns or tag:XXXX where @@ -88,26 +50,24 @@ class HostType { * discovery.ec2.any_group: If set to false, will require all security groups to be present for the instance to be used for the * discovery. Defaults to true. */ - Setting ANY_GROUP_SETTING = - Setting.boolSetting("discovery.ec2.any_group", true, Property.NodeScope); + Setting ANY_GROUP_SETTING = Setting.boolSetting("discovery.ec2.any_group", true, Property.NodeScope); /** * discovery.ec2.groups: Either a comma separated list or array based list of (security) groups. Only instances with the provided * security groups will be used in the cluster discovery. (NOTE: You could provide either group NAME or group ID.) */ - Setting> GROUPS_SETTING = - Setting.listSetting("discovery.ec2.groups", new ArrayList<>(), s -> s.toString(), Property.NodeScope); + Setting> GROUPS_SETTING = Setting.listSetting("discovery.ec2.groups", new ArrayList<>(), s -> s.toString(), + Property.NodeScope); /** * discovery.ec2.availability_zones: Either a comma separated list or array based list of availability zones. Only instances within * the provided availability zones will be used in the cluster discovery. */ - Setting> AVAILABILITY_ZONES_SETTING = - Setting.listSetting("discovery.ec2.availability_zones", Collections.emptyList(), s -> s.toString(), - Property.NodeScope); + Setting> AVAILABILITY_ZONES_SETTING = Setting.listSetting("discovery.ec2.availability_zones", Collections.emptyList(), + s -> s.toString(), Property.NodeScope); /** * discovery.ec2.node_cache_time: How long the list of hosts is cached to prevent further requests to the AWS API. Defaults to 10s. */ - Setting NODE_CACHE_TIME_SETTING = - Setting.timeSetting("discovery.ec2.node_cache_time", TimeValue.timeValueSeconds(10), Property.NodeScope); + Setting NODE_CACHE_TIME_SETTING = Setting.timeSetting("discovery.ec2.node_cache_time", TimeValue.timeValueSeconds(10), + Property.NodeScope); /** * discovery.ec2.tag.*: The ec2 discovery can filter machines to include in the cluster based on tags (and not just groups). @@ -116,7 +76,28 @@ class HostType { * instance to be included. */ Setting.AffixSetting> TAG_SETTING = Setting.prefixKeySetting("discovery.ec2.tag.", - key -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), Property.NodeScope)); + key -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), Property.NodeScope)); + + /** + * Creates then caches an {@code AmazonEC2} client using the current client + * settings. + */ + AmazonEc2Reference client(); - AmazonEC2 client(); + /** + * Updates settings for building the client. Future client requests will use the + * new settings. Implementations SHOULD drop the client cache to prevent reusing + * the client with old settings from cache. + * + * @param clientSettings + * the new settings + * @return the old settings + */ + Ec2ClientSettings updateClientSettings(Ec2ClientSettings clientSettings); + + /** + * Releases the cached client. Subsequent client requests will recreate the + * client instance. Does not touch the client settings. + */ + void releaseCachedClient(); } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java index 3b5b955260e6d..8d31ac213534e 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java @@ -19,12 +19,8 @@ package org.elasticsearch.discovery.ec2; -import java.io.Closeable; -import java.io.IOException; import java.util.Random; -import com.amazonaws.AmazonClientException; -import com.amazonaws.AmazonWebServiceRequest; import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; @@ -38,110 +34,124 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; -class AwsEc2ServiceImpl extends AbstractComponent implements AwsEc2Service, Closeable { +class AwsEc2ServiceImpl extends AbstractComponent implements AwsEc2Service { public static final String EC2_METADATA_URL = "http://169.254.169.254/latest/meta-data/"; - private AmazonEC2Client client; + private volatile AmazonEc2Reference clientReference; + private volatile Ec2ClientSettings clientSettings; AwsEc2ServiceImpl(Settings settings) { super(settings); } - @Override - public synchronized AmazonEC2 client() { - if (client != null) { - return client; - } - - this.client = new AmazonEC2Client(buildCredentials(logger, settings), buildConfiguration(logger, settings)); - String endpoint = findEndpoint(logger, settings); - if (endpoint != null) { - client.setEndpoint(endpoint); + private AmazonEC2 buildClient(Ec2ClientSettings clientSettings) { + final AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings); + final ClientConfiguration configuration = buildConfiguration(logger, clientSettings); + final AmazonEC2 client = buildClient(credentials, configuration); + if (Strings.hasText(clientSettings.endpoint)) { + logger.debug("using explicit ec2 endpoint [{}]", clientSettings.endpoint); + client.setEndpoint(clientSettings.endpoint); } - - return this.client; + return client; } - protected static AWSCredentialsProvider buildCredentials(Logger logger, Settings settings) { - AWSCredentialsProvider credentials; - - try (SecureString key = ACCESS_KEY_SETTING.get(settings); - SecureString secret = SECRET_KEY_SETTING.get(settings)) { - if (key.length() == 0 && secret.length() == 0) { - logger.debug("Using either environment variables, system properties or instance profile credentials"); - credentials = new DefaultAWSCredentialsProviderChain(); - } else { - logger.debug("Using basic key/secret credentials"); - credentials = new StaticCredentialsProvider(new BasicAWSCredentials(key.toString(), secret.toString())); - } - } - - return credentials; + // proxy for testing + AmazonEC2 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) { + final AmazonEC2 client = new AmazonEC2Client(credentials, configuration); + return client; } - protected static ClientConfiguration buildConfiguration(Logger logger, Settings settings) { - ClientConfiguration clientConfiguration = new ClientConfiguration(); + // pkg private for tests + static ClientConfiguration buildConfiguration(Logger logger, Ec2ClientSettings clientSettings) { + final ClientConfiguration clientConfiguration = new ClientConfiguration(); // the response metadata cache is only there for diagnostics purposes, // but can force objects from every response to the old generation. clientConfiguration.setResponseMetadataCacheSize(0); - clientConfiguration.setProtocol(PROTOCOL_SETTING.get(settings)); - - if (PROXY_HOST_SETTING.exists(settings)) { - String proxyHost = PROXY_HOST_SETTING.get(settings); - Integer proxyPort = PROXY_PORT_SETTING.get(settings); - try (SecureString proxyUsername = PROXY_USERNAME_SETTING.get(settings); - SecureString proxyPassword = PROXY_PASSWORD_SETTING.get(settings)) { - - clientConfiguration - .withProxyHost(proxyHost) - .withProxyPort(proxyPort) - .withProxyUsername(proxyUsername.toString()) - .withProxyPassword(proxyPassword.toString()); - } + clientConfiguration.setProtocol(clientSettings.protocol); + if (Strings.hasText(clientSettings.proxyHost)) { + // TODO: remove this leniency, these settings should exist together and be validated + clientConfiguration.setProxyHost(clientSettings.proxyHost); + clientConfiguration.setProxyPort(clientSettings.proxyPort); + clientConfiguration.setProxyUsername(clientSettings.proxyUsername); + clientConfiguration.setProxyPassword(clientSettings.proxyPassword); } - // Increase the number of retries in case of 5xx API responses final Random rand = Randomness.get(); - RetryPolicy retryPolicy = new RetryPolicy( + final RetryPolicy retryPolicy = new RetryPolicy( RetryPolicy.RetryCondition.NO_RETRY_CONDITION, - new RetryPolicy.BackoffStrategy() { - @Override - public long delayBeforeNextRetry(AmazonWebServiceRequest originalRequest, - AmazonClientException exception, - int retriesAttempted) { - // with 10 retries the max delay time is 320s/320000ms (10 * 2^5 * 1 * 1000) - logger.warn("EC2 API request failed, retry again. Reason was:", exception); - return 1000L * (long) (10d * Math.pow(2, retriesAttempted / 2.0d) * (1.0d + rand.nextDouble())); - } + (originalRequest, exception, retriesAttempted) -> { + // with 10 retries the max delay time is 320s/320000ms (10 * 2^5 * 1 * 1000) + logger.warn("EC2 API request failed, retry again. Reason was:", exception); + return 1000L * (long) (10d * Math.pow(2, retriesAttempted / 2.0d) * (1.0d + rand.nextDouble())); }, 10, false); clientConfiguration.setRetryPolicy(retryPolicy); - clientConfiguration.setSocketTimeout((int) READ_TIMEOUT_SETTING.get(settings).millis()); - + clientConfiguration.setSocketTimeout(clientSettings.readTimeoutMillis); return clientConfiguration; } - protected static String findEndpoint(Logger logger, Settings settings) { - String endpoint = null; - if (ENDPOINT_SETTING.exists(settings)) { - endpoint = ENDPOINT_SETTING.get(settings); - logger.debug("using explicit ec2 endpoint [{}]", endpoint); + // pkg private for tests + static AWSCredentialsProvider buildCredentials(Logger logger, Ec2ClientSettings clientSettings) { + final BasicAWSCredentials credentials = clientSettings.credentials; + if (credentials == null) { + logger.debug("Using either environment variables, system properties or instance profile credentials"); + return new DefaultAWSCredentialsProviderChain(); + } else { + logger.debug("Using basic key/secret credentials"); + return new StaticCredentialsProvider(credentials); } - return endpoint; } @Override - public void close() throws IOException { - if (client != null) { - client.shutdown(); + public AmazonEc2Reference client() { + if ((clientReference != null) && clientReference.tryIncRef()) { + return clientReference; + } + synchronized (this) { + if ((clientReference != null) && clientReference.tryIncRef()) { + return clientReference; + } + if (clientSettings == null) { + throw new IllegalArgumentException("Missing ec2 client configs."); + } + final AmazonEc2Reference clientReference = new AmazonEc2Reference(buildClient(clientSettings)); + clientReference.incRef(); + this.clientReference = clientReference; + return clientReference; } + } + - // Ensure that IdleConnectionReaper is shutdown + /** + * Reloads the settings for the AmazonEC2 client. New clients will be build + * using these. Old client is usable until released. On release it will be + * destroyed instead of being returned to the cache. + */ + @Override + public synchronized Ec2ClientSettings updateClientSettings(Ec2ClientSettings clientSettings) { + // shutdown all unused clients + // others will shutdown on their respective release + releaseCachedClient(); + final Ec2ClientSettings prevSettings = this.clientSettings; + this.clientSettings = clientSettings; + return prevSettings; + } + + @Override + public synchronized void releaseCachedClient() { + if (this.clientReference == null) { + return; + } + // the client will shutdown when it will not be used anymore + this.clientReference.decRef(); + // clear the cached client, it will be build lazily + this.clientReference = null; + // shutdown IdleConnectionReaper background thread + // it will be restarted on new client usage IdleConnectionReaper.shutdown(); } } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java index f291413d408ed..2c536981b04c5 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery.ec2; import com.amazonaws.AmazonClientException; -import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.model.DescribeInstancesRequest; import com.amazonaws.services.ec2.model.DescribeInstancesResult; import com.amazonaws.services.ec2.model.Filter; @@ -59,7 +58,7 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos private final TransportService transportService; - private final AmazonEC2 client; + private final AwsEc2Service awsEc2Service; private final boolean bindAnyGroup; @@ -76,7 +75,7 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos AwsEc2UnicastHostsProvider(Settings settings, TransportService transportService, AwsEc2Service awsEc2Service) { super(settings); this.transportService = transportService; - this.client = awsEc2Service.client(); + this.awsEc2Service = awsEc2Service; this.hostType = AwsEc2Service.HOST_TYPE_SETTING.get(settings); this.discoNodes = new DiscoNodesCache(AwsEc2Service.NODE_CACHE_TIME_SETTING.get(settings)); @@ -103,31 +102,31 @@ public List buildDynamicNodes() { protected List fetchDynamicNodes() { - List discoNodes = new ArrayList<>(); + final List discoNodes = new ArrayList<>(); - DescribeInstancesResult descInstances; - try { + final DescribeInstancesResult descInstances; + try (AmazonEc2Reference clientReference = awsEc2Service.client()) { // Query EC2 API based on AZ, instance state, and tag. // NOTE: we don't filter by security group during the describe instances request for two reasons: // 1. differences in VPCs require different parameters during query (ID vs Name) // 2. We want to use two different strategies: (all security groups vs. any security groups) - descInstances = SocketAccess.doPrivileged(() -> client.describeInstances(buildDescribeInstancesRequest())); - } catch (AmazonClientException e) { + descInstances = SocketAccess.doPrivileged(() -> clientReference.client().describeInstances(buildDescribeInstancesRequest())); + } catch (final AmazonClientException e) { logger.info("Exception while retrieving instance list from AWS API: {}", e.getMessage()); logger.debug("Full exception:", e); return discoNodes; } logger.trace("building dynamic unicast discovery nodes..."); - for (Reservation reservation : descInstances.getReservations()) { - for (Instance instance : reservation.getInstances()) { + for (final Reservation reservation : descInstances.getReservations()) { + for (final Instance instance : reservation.getInstances()) { // lets see if we can filter based on groups if (!groups.isEmpty()) { - List instanceSecurityGroups = instance.getSecurityGroups(); - List securityGroupNames = new ArrayList<>(instanceSecurityGroups.size()); - List securityGroupIds = new ArrayList<>(instanceSecurityGroups.size()); - for (GroupIdentifier sg : instanceSecurityGroups) { + final List instanceSecurityGroups = instance.getSecurityGroups(); + final List securityGroupNames = new ArrayList<>(instanceSecurityGroups.size()); + final List securityGroupIds = new ArrayList<>(instanceSecurityGroups.size()); + for (final GroupIdentifier sg : instanceSecurityGroups) { securityGroupNames.add(sg.getGroupName()); securityGroupIds.add(sg.getGroupId()); } @@ -162,10 +161,10 @@ && disjoint(securityGroupIds, groups)) { address = instance.getPublicIpAddress(); } else if (hostType.startsWith(TAG_PREFIX)) { // Reading the node host from its metadata - String tagName = hostType.substring(TAG_PREFIX.length()); + final String tagName = hostType.substring(TAG_PREFIX.length()); logger.debug("reading hostname from [{}] instance tag", tagName); - List tags = instance.getTags(); - for (Tag tag : tags) { + final List tags = instance.getTags(); + for (final Tag tag : tags) { if (tag.getKey().equals(tagName)) { address = tag.getValue(); logger.debug("using [{}] as the instance address", address); @@ -177,13 +176,13 @@ && disjoint(securityGroupIds, groups)) { if (address != null) { try { // we only limit to 1 port per address, makes no sense to ping 100 ports - TransportAddress[] addresses = transportService.addressesFromString(address, 1); + final TransportAddress[] addresses = transportService.addressesFromString(address, 1); for (int i = 0; i < addresses.length; i++) { logger.trace("adding {}, address {}, transport_address {}", instance.getInstanceId(), address, addresses[i]); discoNodes.add(new DiscoveryNode(instance.getInstanceId(), "#cloud-" + instance.getInstanceId() + "-" + i, addresses[i], emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion())); } - } catch (Exception e) { + } catch (final Exception e) { final String finalAddress = address; logger.warn( (Supplier) @@ -201,12 +200,12 @@ && disjoint(securityGroupIds, groups)) { } private DescribeInstancesRequest buildDescribeInstancesRequest() { - DescribeInstancesRequest describeInstancesRequest = new DescribeInstancesRequest() + final DescribeInstancesRequest describeInstancesRequest = new DescribeInstancesRequest() .withFilters( new Filter("instance-state-name").withValues("running", "pending") ); - for (Map.Entry> tagFilter : tags.entrySet()) { + for (final Map.Entry> tagFilter : tags.entrySet()) { // for a given tag key, OR relationship for multiple different values describeInstancesRequest.withFilters( new Filter("tag:" + tagFilter.getKey()).withValues(tagFilter.getValue()) @@ -238,7 +237,7 @@ protected boolean needsRefresh() { @Override protected List refresh() { - List nodes = fetchDynamicNodes(); + final List nodes = fetchDynamicNodes(); empty = nodes.isEmpty(); return nodes; } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java new file mode 100644 index 0000000000000..b42b0d546001a --- /dev/null +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java @@ -0,0 +1,145 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.ec2; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.Protocol; +import com.amazonaws.auth.BasicAWSCredentials; + +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.unit.TimeValue; +import java.util.Locale; + +/** + * A container for settings used to create an EC2 client. + */ +final class Ec2ClientSettings { + + /** The access key (ie login id) for connecting to ec2. */ + static final Setting ACCESS_KEY_SETTING = SecureSetting.secureString("discovery.ec2.access_key", null); + + /** The secret key (ie password) for connecting to ec2. */ + static final Setting SECRET_KEY_SETTING = SecureSetting.secureString("discovery.ec2.secret_key", null); + + /** The host name of a proxy to connect to ec2 through. */ + static final Setting PROXY_HOST_SETTING = Setting.simpleString("discovery.ec2.proxy.host", Property.NodeScope); + + /** The port of a proxy to connect to ec2 through. */ + static final Setting PROXY_PORT_SETTING = Setting.intSetting("discovery.ec2.proxy.port", 80, 0, 1 << 16, Property.NodeScope); + + /** An override for the ec2 endpoint to connect to. */ + static final Setting ENDPOINT_SETTING = new Setting<>("discovery.ec2.endpoint", "", s -> s.toLowerCase(Locale.ROOT), + Property.NodeScope); + + /** The protocol to use to connect to to ec2. */ + static final Setting PROTOCOL_SETTING = new Setting<>("discovery.ec2.protocol", "https", + s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope); + + /** The username of a proxy to connect to s3 through. */ + static final Setting PROXY_USERNAME_SETTING = SecureSetting.secureString("discovery.ec2.proxy.username", null); + + /** The password of a proxy to connect to s3 through. */ + static final Setting PROXY_PASSWORD_SETTING = SecureSetting.secureString("discovery.ec2.proxy.password", null); + + /** The socket timeout for connecting to s3. */ + static final Setting READ_TIMEOUT_SETTING = Setting.timeSetting("discovery.ec2.read_timeout", + TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope); + + /** Credentials to authenticate with ec2. */ + final BasicAWSCredentials credentials; + + /** + * The ec2 endpoint the client should talk to, or empty string to use the + * default. + */ + final String endpoint; + + /** The protocol to use to talk to ec2. Defaults to https. */ + final Protocol protocol; + + /** An optional proxy host that requests to ec2 should be made through. */ + final String proxyHost; + + /** The port number the proxy host should be connected on. */ + final int proxyPort; + + // these should be "secure" yet the api for the ec2 client only takes String, so + // storing them + // as SecureString here won't really help with anything + /** An optional username for the proxy host, for basic authentication. */ + final String proxyUsername; + + /** An optional password for the proxy host, for basic authentication. */ + final String proxyPassword; + + /** The read timeout for the ec2 client. */ + final int readTimeoutMillis; + + protected Ec2ClientSettings(BasicAWSCredentials credentials, String endpoint, Protocol protocol, String proxyHost, int proxyPort, + String proxyUsername, String proxyPassword, int readTimeoutMillis) { + this.credentials = credentials; + this.endpoint = endpoint; + this.protocol = protocol; + this.proxyHost = proxyHost; + this.proxyPort = proxyPort; + this.proxyUsername = proxyUsername; + this.proxyPassword = proxyPassword; + this.readTimeoutMillis = readTimeoutMillis; + } + + static BasicAWSCredentials loadCredentials(Settings settings) { + try (SecureString accessKey = ACCESS_KEY_SETTING.get(settings); + SecureString secretKey = SECRET_KEY_SETTING.get(settings);) { + if (accessKey.length() != 0) { + if (secretKey.length() != 0) { + return new BasicAWSCredentials(accessKey.toString(), secretKey.toString()); + } else { + throw new IllegalArgumentException("Missing secret key for ec2 client."); + } + } else if (secretKey.length() != 0) { + throw new IllegalArgumentException("Missing access key for ec2 client."); + } + return null; + } + } + + // pkg private for tests + /** Parse settings for a single client. */ + static Ec2ClientSettings getClientSettings(Settings settings) { + final BasicAWSCredentials credentials = loadCredentials(settings); + try (SecureString proxyUsername = PROXY_USERNAME_SETTING.get(settings); + SecureString proxyPassword = PROXY_PASSWORD_SETTING.get(settings)) { + return new Ec2ClientSettings( + credentials, + ENDPOINT_SETTING.get(settings), + PROTOCOL_SETTING.get(settings), + PROXY_HOST_SETTING.get(settings), + PROXY_PORT_SETTING.get(settings), + proxyUsername.toString(), + proxyPassword.toString(), + (int)READ_TIMEOUT_SETTING.get(settings).millis()); + } + } + +} diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java index 28d563e6a9ca6..4b86263b9ad55 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java @@ -21,8 +21,6 @@ import com.amazonaws.util.json.Jackson; import org.apache.logging.log4j.Logger; -import org.elasticsearch.core.internal.io.IOUtils; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.logging.Loggers; @@ -33,10 +31,10 @@ import org.elasticsearch.node.Node; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ReInitializablePlugin; import org.elasticsearch.transport.TransportService; import java.io.BufferedReader; -import java.io.Closeable; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; @@ -52,7 +50,7 @@ import java.util.Map; import java.util.function.Supplier; -public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Closeable { +public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, ReInitializablePlugin { private static Logger logger = Loggers.getLogger(Ec2DiscoveryPlugin.class); public static final String EC2 = "ec2"; @@ -68,22 +66,27 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close // ClientConfiguration clinit has some classloader problems // TODO: fix that Class.forName("com.amazonaws.ClientConfiguration"); - } catch (ClassNotFoundException e) { + } catch (final ClassNotFoundException e) { throw new RuntimeException(e); } return null; }); } - private Settings settings; - // stashed when created in order to properly close - private final SetOnce ec2Service = new SetOnce<>(); + private final Settings settings; + // protected for testing + protected final AwsEc2Service ec2Service; public Ec2DiscoveryPlugin(Settings settings) { - this.settings = settings; + this(settings, new AwsEc2ServiceImpl(settings)); } - + protected Ec2DiscoveryPlugin(Settings settings, AwsEc2ServiceImpl ec2Service) { + this.settings = settings; + this.ec2Service = ec2Service; + // eagerly load client settings when secure settings are accessible + reinit(settings); + } @Override public NetworkService.CustomNameResolver getCustomNameResolver(Settings settings) { @@ -94,25 +97,22 @@ public NetworkService.CustomNameResolver getCustomNameResolver(Settings settings @Override public Map> getZenHostsProviders(TransportService transportService, NetworkService networkService) { - return Collections.singletonMap(EC2, () -> { - ec2Service.set(new AwsEc2ServiceImpl(settings)); - return new AwsEc2UnicastHostsProvider(settings, transportService, ec2Service.get()); - }); + return Collections.singletonMap(EC2, () -> new AwsEc2UnicastHostsProvider(settings, transportService, ec2Service)); } @Override public List> getSettings() { return Arrays.asList( // Register EC2 discovery settings: discovery.ec2 - AwsEc2Service.ACCESS_KEY_SETTING, - AwsEc2Service.SECRET_KEY_SETTING, - AwsEc2Service.ENDPOINT_SETTING, - AwsEc2Service.PROTOCOL_SETTING, - AwsEc2Service.PROXY_HOST_SETTING, - AwsEc2Service.PROXY_PORT_SETTING, - AwsEc2Service.PROXY_USERNAME_SETTING, - AwsEc2Service.PROXY_PASSWORD_SETTING, - AwsEc2Service.READ_TIMEOUT_SETTING, + Ec2ClientSettings.ACCESS_KEY_SETTING, + Ec2ClientSettings.SECRET_KEY_SETTING, + Ec2ClientSettings.ENDPOINT_SETTING, + Ec2ClientSettings.PROTOCOL_SETTING, + Ec2ClientSettings.PROXY_HOST_SETTING, + Ec2ClientSettings.PROXY_PORT_SETTING, + Ec2ClientSettings.PROXY_USERNAME_SETTING, + Ec2ClientSettings.PROXY_PASSWORD_SETTING, + Ec2ClientSettings.READ_TIMEOUT_SETTING, AwsEc2Service.HOST_TYPE_SETTING, AwsEc2Service.ANY_GROUP_SETTING, AwsEc2Service.GROUPS_SETTING, @@ -125,10 +125,10 @@ public List> getSettings() { @Override public Settings additionalSettings() { - Settings.Builder builder = Settings.builder(); + final Settings.Builder builder = Settings.builder(); // Adds a node attribute for the ec2 availability zone - String azMetadataUrl = AwsEc2ServiceImpl.EC2_METADATA_URL + "placement/availability-zone"; + final String azMetadataUrl = AwsEc2ServiceImpl.EC2_METADATA_URL + "placement/availability-zone"; builder.put(getAvailabilityZoneNodeAttributes(settings, azMetadataUrl)); return builder.build(); } @@ -139,7 +139,7 @@ static Settings getAvailabilityZoneNodeAttributes(Settings settings, String azMe if (AwsEc2Service.AUTO_ATTRIBUTE_SETTING.get(settings) == false) { return Settings.EMPTY; } - Settings.Builder attrs = Settings.builder(); + final Settings.Builder attrs = Settings.builder(); final URL url; final URLConnection urlConnection; @@ -148,7 +148,7 @@ static Settings getAvailabilityZoneNodeAttributes(Settings settings, String azMe logger.debug("obtaining ec2 [placement/availability-zone] from ec2 meta-data url {}", url); urlConnection = SocketAccess.doPrivilegedIOException(url::openConnection); urlConnection.setConnectTimeout(2000); - } catch (IOException e) { + } catch (final IOException e) { // should not happen, we know the url is not malformed, and openConnection does not actually hit network throw new UncheckedIOException(e); } @@ -156,13 +156,13 @@ static Settings getAvailabilityZoneNodeAttributes(Settings settings, String azMe try (InputStream in = SocketAccess.doPrivilegedIOException(urlConnection::getInputStream); BufferedReader urlReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8))) { - String metadataResult = urlReader.readLine(); - if (metadataResult == null || metadataResult.length() == 0) { + final String metadataResult = urlReader.readLine(); + if ((metadataResult == null) || (metadataResult.length() == 0)) { throw new IllegalStateException("no ec2 metadata returned from " + url); } else { attrs.put(Node.NODE_ATTRIBUTES.getKey() + "aws_availability_zone", metadataResult); } - } catch (IOException e) { + } catch (final IOException e) { // this is lenient so the plugin does not fail when installed outside of ec2 logger.error("failed to get metadata for [placement/availability-zone]", e); } @@ -172,6 +172,14 @@ static Settings getAvailabilityZoneNodeAttributes(Settings settings, String azMe @Override public void close() throws IOException { - IOUtils.close(ec2Service.get()); + ec2Service.releaseCachedClient(); + } + + @Override + public boolean reinit(Settings settings) { + // secure settings should be readable + final Ec2ClientSettings clientSettings = Ec2ClientSettings.getClientSettings(settings); + ec2Service.updateClientSettings(clientSettings); + return true; } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEc2Mock.java similarity index 99% rename from plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java rename to plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEc2Mock.java index 34ad449d06e8d..f18375e583295 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEc2Mock.java @@ -22,7 +22,9 @@ import com.amazonaws.AmazonClientException; import com.amazonaws.AmazonServiceException; import com.amazonaws.AmazonWebServiceRequest; +import com.amazonaws.ClientConfiguration; import com.amazonaws.ResponseMetadata; +import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.regions.Region; import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.model.AcceptVpcPeeringConnectionRequest; @@ -517,9 +519,9 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -public class AmazonEC2Mock implements AmazonEC2 { +public class AmazonEc2Mock implements AmazonEC2 { - private static final Logger logger = ESLoggerFactory.getLogger(AmazonEC2Mock.class.getName()); + private static final Logger logger = ESLoggerFactory.getLogger(AmazonEc2Mock.class.getName()); public static final String PREFIX_PRIVATE_IP = "10.0.0."; public static final String PREFIX_PUBLIC_IP = "8.8.8."; @@ -528,9 +530,12 @@ public class AmazonEC2Mock implements AmazonEC2 { public static final String PREFIX_PRIVATE_DNS = "mock-ip-"; public static final String SUFFIX_PRIVATE_DNS = ".ec2.internal"; - List instances = new ArrayList<>(); + final List instances = new ArrayList<>(); + String endpoint; + final AWSCredentialsProvider credentials; + final ClientConfiguration configuration; - public AmazonEC2Mock(int nodes, List> tagsList) { + public AmazonEc2Mock(int nodes, List> tagsList, AWSCredentialsProvider credentials, ClientConfiguration configuration) { if (tagsList != null) { assert tagsList.size() == nodes; } @@ -552,7 +557,8 @@ public AmazonEC2Mock(int nodes, List> tagsList) { instances.add(instance); } - + this.credentials = credentials; + this.configuration = configuration; } @Override @@ -642,7 +648,7 @@ public DescribeInstancesResult describeInstances(DescribeInstancesRequest descri @Override public void setEndpoint(String endpoint) throws IllegalArgumentException { - throw new UnsupportedOperationException("Not supported in mock"); + this.endpoint = endpoint; } @Override @@ -2110,7 +2116,6 @@ public DryRunResult dryRun(DryRunSupporte @Override public void shutdown() { - throw new UnsupportedOperationException("Not supported in mock"); } @Override diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java index 06693bbff11ac..a13fe47a632ae 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java @@ -25,34 +25,32 @@ import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.ec2.AwsEc2Service; import org.elasticsearch.discovery.ec2.AwsEc2ServiceImpl; import org.elasticsearch.test.ESTestCase; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; public class AwsEc2ServiceImplTests extends ESTestCase { public void testAWSCredentialsWithSystemProviders() { - AWSCredentialsProvider credentialsProvider = AwsEc2ServiceImpl.buildCredentials(logger, Settings.EMPTY); + final AWSCredentialsProvider credentialsProvider = AwsEc2ServiceImpl.buildCredentials(logger, + Ec2ClientSettings.getClientSettings(Settings.EMPTY)); assertThat(credentialsProvider, instanceOf(DefaultAWSCredentialsProviderChain.class)); } public void testAWSCredentialsWithElasticsearchAwsSettings() { - MockSecureSettings secureSettings = new MockSecureSettings(); + final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("discovery.ec2.access_key", "aws_key"); secureSettings.setString("discovery.ec2.secret_key", "aws_secret"); - Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); launchAWSCredentialsWithElasticsearchSettingsTest(settings, "aws_key", "aws_secret"); } protected void launchAWSCredentialsWithElasticsearchSettingsTest(Settings settings, String expectedKey, String expectedSecret) { - AWSCredentials credentials = AwsEc2ServiceImpl.buildCredentials(logger, settings).getCredentials(); + final AWSCredentials credentials = AwsEc2ServiceImpl.buildCredentials(logger, Ec2ClientSettings.getClientSettings(settings)) + .getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is(expectedKey)); assertThat(credentials.getAWSSecretKey(), is(expectedSecret)); } @@ -63,10 +61,10 @@ public void testAWSDefaultConfiguration() { } public void testAWSConfigurationWithAwsSettings() { - MockSecureSettings secureSettings = new MockSecureSettings(); + final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("discovery.ec2.proxy.username", "aws_proxy_username"); secureSettings.setString("discovery.ec2.proxy.password", "aws_proxy_password"); - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .put("discovery.ec2.protocol", "http") .put("discovery.ec2.proxy.host", "aws_proxy_host") .put("discovery.ec2.proxy.port", 8080) @@ -83,7 +81,8 @@ protected void launchAWSConfigurationTest(Settings settings, String expectedProxyUsername, String expectedProxyPassword, int expectedReadTimeout) { - ClientConfiguration configuration = AwsEc2ServiceImpl.buildConfiguration(logger, settings); + final ClientConfiguration configuration = AwsEc2ServiceImpl.buildConfiguration(logger, + Ec2ClientSettings.getClientSettings(settings)); assertThat(configuration.getResponseMetadataCacheSize(), is(0)); assertThat(configuration.getProtocol(), is(expectedProtocol)); @@ -94,16 +93,4 @@ protected void launchAWSConfigurationTest(Settings settings, assertThat(configuration.getSocketTimeout(), is(expectedReadTimeout)); } - public void testDefaultEndpoint() { - String endpoint = AwsEc2ServiceImpl.findEndpoint(logger, Settings.EMPTY); - assertThat(endpoint, nullValue()); - } - - public void testSpecificEndpoint() { - Settings settings = Settings.builder() - .put(AwsEc2Service.ENDPOINT_SETTING.getKey(), "ec2.endpoint") - .build(); - String endpoint = AwsEc2ServiceImpl.findEndpoint(logger, settings); - assertThat(endpoint, is("ec2.endpoint")); - } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java index e29821efda223..cbe670561e6b7 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java @@ -19,18 +19,19 @@ package org.elasticsearch.discovery.ec2; +import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.model.Tag; -import org.elasticsearch.common.component.AbstractLifecycleComponent; + import org.elasticsearch.common.settings.Settings; import java.util.List; -public class AwsEc2ServiceMock extends AbstractLifecycleComponent implements AwsEc2Service { +public class AwsEc2ServiceMock extends AwsEc2ServiceImpl { - private int nodes; - private List> tagsList; - private AmazonEC2 client; + private final int nodes; + private final List> tagsList; public AwsEc2ServiceMock(Settings settings, int nodes, List> tagsList) { super(settings); @@ -39,26 +40,8 @@ public AwsEc2ServiceMock(Settings settings, int nodes, List> tagsList) } @Override - public synchronized AmazonEC2 client() { - if (client == null) { - client = new AmazonEC2Mock(nodes, tagsList); - } - - return client; + AmazonEC2 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) { + return new AmazonEc2Mock(nodes, tagsList, credentials, configuration); } - @Override - protected void doStart() { - - } - - @Override - protected void doStop() { - - } - - @Override - protected void doClose() { - - } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java index 49fd9de71ecfa..03aa136d7dddc 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java @@ -80,14 +80,14 @@ protected Settings nodeSettings(int nodeOrdinal) { throw new RuntimeException(e); } MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(AwsEc2Service.ACCESS_KEY_SETTING.getKey(), "some_access"); - secureSettings.setString(AwsEc2Service.SECRET_KEY_SETTING.getKey(), "some_secret"); + secureSettings.setString(Ec2ClientSettings.ACCESS_KEY_SETTING.getKey(), "some_access"); + secureSettings.setString(Ec2ClientSettings.SECRET_KEY_SETTING.getKey(), "some_secret"); return Settings.builder().put(super.nodeSettings(nodeOrdinal)) .put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "ec2") .put("path.logs", resolve) .put("transport.tcp.port", 0) .put("node.portsfile", "true") - .put(AwsEc2Service.ENDPOINT_SETTING.getKey(), "http://" + httpServer.getAddress().getHostName() + ":" + + .put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "http://" + httpServer.getAddress().getHostName() + ":" + httpServer.getAddress().getPort()) .setSecureSettings(secureSettings) .build(); diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginMock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginMock.java new file mode 100644 index 0000000000000..a92bd243bc9b7 --- /dev/null +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginMock.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.ec2; + +import com.amazonaws.services.ec2.model.Tag; + +import org.elasticsearch.common.settings.Settings; + +import java.util.List; + +public class Ec2DiscoveryPluginMock extends Ec2DiscoveryPlugin { + + Ec2DiscoveryPluginMock(Settings settings) { + this(settings, 1, null); + } + + public Ec2DiscoveryPluginMock(Settings settings, int nodes, List> tagsList) { + super(settings, new AwsEc2ServiceMock(settings, nodes, tagsList)); + } + +} diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java index 9bb75c0b09f97..87754cc8f9af6 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java @@ -19,12 +19,17 @@ package org.elasticsearch.discovery.ec2; +import java.io.IOException; import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + import org.elasticsearch.discovery.ec2.AwsEc2Service; +import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.ec2.Ec2DiscoveryPlugin; import org.elasticsearch.node.Node; @@ -33,14 +38,14 @@ public class Ec2DiscoveryPluginTests extends ESTestCase { private Settings getNodeAttributes(Settings settings, String url) { - Settings realSettings = Settings.builder() + final Settings realSettings = Settings.builder() .put(AwsEc2Service.AUTO_ATTRIBUTE_SETTING.getKey(), true) .put(settings).build(); return Ec2DiscoveryPlugin.getAvailabilityZoneNodeAttributes(realSettings, url); } private void assertNodeAttributes(Settings settings, String url, String expected) { - Settings additional = getNodeAttributes(settings, url); + final Settings additional = getNodeAttributes(settings, url); if (expected == null) { assertTrue(additional.isEmpty()); } else { @@ -49,36 +54,106 @@ private void assertNodeAttributes(Settings settings, String url, String expected } public void testNodeAttributesDisabled() { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .put(AwsEc2Service.AUTO_ATTRIBUTE_SETTING.getKey(), false).build(); assertNodeAttributes(settings, "bogus", null); } public void testNodeAttributes() throws Exception { - Path zoneUrl = createTempFile(); + final Path zoneUrl = createTempFile(); Files.write(zoneUrl, Arrays.asList("us-east-1c")); assertNodeAttributes(Settings.EMPTY, zoneUrl.toUri().toURL().toString(), "us-east-1c"); } public void testNodeAttributesBogusUrl() { - UncheckedIOException e = expectThrows(UncheckedIOException.class, () -> + final UncheckedIOException e = expectThrows(UncheckedIOException.class, () -> getNodeAttributes(Settings.EMPTY, "bogus") ); assertNotNull(e.getCause()); - String msg = e.getCause().getMessage(); + final String msg = e.getCause().getMessage(); assertTrue(msg, msg.contains("no protocol: bogus")); } public void testNodeAttributesEmpty() throws Exception { - Path zoneUrl = createTempFile(); - IllegalStateException e = expectThrows(IllegalStateException.class, () -> + final Path zoneUrl = createTempFile(); + final IllegalStateException e = expectThrows(IllegalStateException.class, () -> getNodeAttributes(Settings.EMPTY, zoneUrl.toUri().toURL().toString()) ); assertTrue(e.getMessage(), e.getMessage().contains("no ec2 metadata returned")); } public void testNodeAttributesErrorLenient() throws Exception { - Path dne = createTempDir().resolve("dne"); + final Path dne = createTempDir().resolve("dne"); assertNodeAttributes(Settings.EMPTY, dne.toUri().toURL().toString(), null); } + + public void testDefaultEndpoint() throws IOException { + try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(Settings.EMPTY)) { + final String endpoint = ((AmazonEc2Mock) plugin.ec2Service.client().client()).endpoint; + assertThat(endpoint, nullValue()); + } + } + + public void testSpecificEndpoint() throws IOException { + final Settings settings = Settings.builder().put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2.endpoint").build(); + try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(settings)) { + final String endpoint = ((AmazonEc2Mock) plugin.ec2Service.client().client()).endpoint; + assertThat(endpoint, is("ec2.endpoint")); + } + } + + public void testClientSettingsReInit() throws IOException { + final MockSecureSettings mockSecure1 = new MockSecureSettings(); + mockSecure1.setString(Ec2ClientSettings.ACCESS_KEY_SETTING.getKey(), "ec2_access_1"); + mockSecure1.setString(Ec2ClientSettings.SECRET_KEY_SETTING.getKey(), "ec2_secret_1"); + mockSecure1.setString(Ec2ClientSettings.PROXY_USERNAME_SETTING.getKey(), "proxy_username_1"); + mockSecure1.setString(Ec2ClientSettings.PROXY_PASSWORD_SETTING.getKey(), "proxy_password_1"); + final Settings settings1 = Settings.builder() + .put(Ec2ClientSettings.PROXY_HOST_SETTING.getKey(), "proxy_host_1") + .put(Ec2ClientSettings.PROXY_PORT_SETTING.getKey(), 881) + .put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2_endpoint_1") + .setSecureSettings(mockSecure1) + .build(); + final MockSecureSettings mockSecure2 = new MockSecureSettings(); + mockSecure2.setString(Ec2ClientSettings.ACCESS_KEY_SETTING.getKey(), "ec2_access_2"); + mockSecure2.setString(Ec2ClientSettings.SECRET_KEY_SETTING.getKey(), "ec2_secret_2"); + mockSecure2.setString(Ec2ClientSettings.PROXY_USERNAME_SETTING.getKey(), "proxy_username_2"); + mockSecure2.setString(Ec2ClientSettings.PROXY_PASSWORD_SETTING.getKey(), "proxy_password_2"); + final Settings settings2 = Settings.builder() + .put(Ec2ClientSettings.PROXY_HOST_SETTING.getKey(), "proxy_host_2") + .put(Ec2ClientSettings.PROXY_PORT_SETTING.getKey(), 882) + .put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2_endpoint_2") + .setSecureSettings(mockSecure2) + .build(); + try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(settings1)) { + try (AmazonEc2Reference clientReference = plugin.ec2Service.client()) { + assertThat(((AmazonEc2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_1")); + assertThat(((AmazonEc2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_1")); + assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_1")); + assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1")); + assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1")); + assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyPort(), is(881)); + assertThat(((AmazonEc2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1")); + // reload secure settings2 + plugin.reinit(settings2); + // client is not released, it is still using the old settings + assertThat(((AmazonEc2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_1")); + assertThat(((AmazonEc2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_1")); + assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_1")); + assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1")); + assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1")); + assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyPort(), is(881)); + assertThat(((AmazonEc2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1")); + } + try (AmazonEc2Reference clientReference = plugin.ec2Service.client()) { + assertThat(((AmazonEc2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_2")); + assertThat(((AmazonEc2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_2")); + assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_2")); + assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_2")); + assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_2")); + assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyPort(), is(882)); + assertThat(((AmazonEc2Mock) clientReference.client()).endpoint, is("ec2_endpoint_2")); + } + } + } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java index e7986cb878e41..f31295678d8f9 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java @@ -39,6 +39,7 @@ import org.junit.Before; import org.junit.BeforeClass; +import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.ArrayList; @@ -91,11 +92,15 @@ protected List buildDynamicNodes(Settings nodeSettings, int nodes } protected List buildDynamicNodes(Settings nodeSettings, int nodes, List> tagsList) { - AwsEc2Service awsEc2Service = new AwsEc2ServiceMock(nodeSettings, nodes, tagsList); - AwsEc2UnicastHostsProvider provider = new AwsEc2UnicastHostsProvider(nodeSettings, transportService, awsEc2Service); - List discoveryNodes = provider.buildDynamicNodes(); - logger.debug("--> nodes found: {}", discoveryNodes); - return discoveryNodes; + try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(Settings.EMPTY, nodes, tagsList)) { + AwsEc2UnicastHostsProvider provider = new AwsEc2UnicastHostsProvider(nodeSettings, transportService, plugin.ec2Service); + List discoveryNodes = provider.buildDynamicNodes(); + logger.debug("--> nodes found: {}", discoveryNodes); + return discoveryNodes; + } catch (IOException e) { + fail("Unexpected IOException"); + return null; + } } public void testDefaultSettings() throws InterruptedException { @@ -109,7 +114,7 @@ public void testDefaultSettings() throws InterruptedException { public void testPrivateIp() throws InterruptedException { int nodes = randomInt(10); for (int i = 0; i < nodes; i++) { - poorMansDNS.put(AmazonEC2Mock.PREFIX_PRIVATE_IP + (i+1), buildNewFakeTransportAddress()); + poorMansDNS.put(AmazonEc2Mock.PREFIX_PRIVATE_IP + (i+1), buildNewFakeTransportAddress()); } Settings nodeSettings = Settings.builder() .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "private_ip") @@ -120,7 +125,7 @@ public void testPrivateIp() throws InterruptedException { int node = 1; for (DiscoveryNode discoveryNode : discoveryNodes) { TransportAddress address = discoveryNode.getAddress(); - TransportAddress expected = poorMansDNS.get(AmazonEC2Mock.PREFIX_PRIVATE_IP + node++); + TransportAddress expected = poorMansDNS.get(AmazonEc2Mock.PREFIX_PRIVATE_IP + node++); assertEquals(address, expected); } } @@ -128,7 +133,7 @@ public void testPrivateIp() throws InterruptedException { public void testPublicIp() throws InterruptedException { int nodes = randomInt(10); for (int i = 0; i < nodes; i++) { - poorMansDNS.put(AmazonEC2Mock.PREFIX_PUBLIC_IP + (i+1), buildNewFakeTransportAddress()); + poorMansDNS.put(AmazonEc2Mock.PREFIX_PUBLIC_IP + (i+1), buildNewFakeTransportAddress()); } Settings nodeSettings = Settings.builder() .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "public_ip") @@ -139,7 +144,7 @@ public void testPublicIp() throws InterruptedException { int node = 1; for (DiscoveryNode discoveryNode : discoveryNodes) { TransportAddress address = discoveryNode.getAddress(); - TransportAddress expected = poorMansDNS.get(AmazonEC2Mock.PREFIX_PUBLIC_IP + node++); + TransportAddress expected = poorMansDNS.get(AmazonEc2Mock.PREFIX_PUBLIC_IP + node++); assertEquals(address, expected); } } @@ -148,8 +153,8 @@ public void testPrivateDns() throws InterruptedException { int nodes = randomInt(10); for (int i = 0; i < nodes; i++) { String instanceId = "node" + (i+1); - poorMansDNS.put(AmazonEC2Mock.PREFIX_PRIVATE_DNS + instanceId + - AmazonEC2Mock.SUFFIX_PRIVATE_DNS, buildNewFakeTransportAddress()); + poorMansDNS.put(AmazonEc2Mock.PREFIX_PRIVATE_DNS + instanceId + + AmazonEc2Mock.SUFFIX_PRIVATE_DNS, buildNewFakeTransportAddress()); } Settings nodeSettings = Settings.builder() .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "private_dns") @@ -162,7 +167,7 @@ public void testPrivateDns() throws InterruptedException { String instanceId = "node" + node++; TransportAddress address = discoveryNode.getAddress(); TransportAddress expected = poorMansDNS.get( - AmazonEC2Mock.PREFIX_PRIVATE_DNS + instanceId + AmazonEC2Mock.SUFFIX_PRIVATE_DNS); + AmazonEc2Mock.PREFIX_PRIVATE_DNS + instanceId + AmazonEc2Mock.SUFFIX_PRIVATE_DNS); assertEquals(address, expected); } } @@ -171,8 +176,8 @@ public void testPublicDns() throws InterruptedException { int nodes = randomInt(10); for (int i = 0; i < nodes; i++) { String instanceId = "node" + (i+1); - poorMansDNS.put(AmazonEC2Mock.PREFIX_PUBLIC_DNS + instanceId - + AmazonEC2Mock.SUFFIX_PUBLIC_DNS, buildNewFakeTransportAddress()); + poorMansDNS.put(AmazonEc2Mock.PREFIX_PUBLIC_DNS + instanceId + + AmazonEc2Mock.SUFFIX_PUBLIC_DNS, buildNewFakeTransportAddress()); } Settings nodeSettings = Settings.builder() .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "public_dns") @@ -185,7 +190,7 @@ public void testPublicDns() throws InterruptedException { String instanceId = "node" + node++; TransportAddress address = discoveryNode.getAddress(); TransportAddress expected = poorMansDNS.get( - AmazonEC2Mock.PREFIX_PUBLIC_DNS + instanceId + AmazonEC2Mock.SUFFIX_PUBLIC_DNS); + AmazonEc2Mock.PREFIX_PUBLIC_DNS + instanceId + AmazonEc2Mock.SUFFIX_PUBLIC_DNS); assertEquals(address, expected); } } @@ -315,22 +320,23 @@ protected List fetchDynamicNodes() { public void testGetNodeListCached() throws Exception { Settings.Builder builder = Settings.builder() .put(AwsEc2Service.NODE_CACHE_TIME_SETTING.getKey(), "500ms"); - AwsEc2Service awsEc2Service = new AwsEc2ServiceMock(Settings.EMPTY, 1, null); - DummyEc2HostProvider provider = new DummyEc2HostProvider(builder.build(), transportService, awsEc2Service) { - @Override - protected List fetchDynamicNodes() { - fetchCount++; - return Ec2DiscoveryTests.this.buildDynamicNodes(Settings.EMPTY, 1); + try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(Settings.EMPTY)) { + DummyEc2HostProvider provider = new DummyEc2HostProvider(builder.build(), transportService, plugin.ec2Service) { + @Override + protected List fetchDynamicNodes() { + fetchCount++; + return Ec2DiscoveryTests.this.buildDynamicNodes(Settings.EMPTY, 1); + } + }; + for (int i=0; i<3; i++) { + provider.buildDynamicNodes(); } - }; - for (int i=0; i<3; i++) { - provider.buildDynamicNodes(); - } - assertThat(provider.fetchCount, is(1)); - Thread.sleep(1_000L); // wait for cache to expire - for (int i=0; i<3; i++) { - provider.buildDynamicNodes(); + assertThat(provider.fetchCount, is(1)); + Thread.sleep(1_000L); // wait for cache to expire + for (int i=0; i<3; i++) { + provider.buildDynamicNodes(); + } + assertThat(provider.fetchCount, is(2)); } - assertThat(provider.fetchCount, is(2)); } } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java index e4d0d98a4e546..ef6088fe154bf 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java @@ -39,7 +39,7 @@ /** * A container for settings used to create an S3 client. */ -class S3ClientSettings { +final class S3ClientSettings { // prefix for s3 client settings private static final String PREFIX = "s3.client."; From 057e388ec3647772e2187832cc9dc2a2931fe986 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Fri, 6 Apr 2018 10:00:33 +0300 Subject: [PATCH 06/21] Update secure settings for the repository azure repository plugin (#29319) --- .../repositories/azure/AzureBlobStore.java | 72 ++-- .../repositories/azure/AzureRepository.java | 52 ++- .../azure/AzureRepositoryPlugin.java | 24 +- .../azure/AzureServiceDisableException.java | 30 -- .../azure/AzureServiceRemoteException.java | 30 -- .../azure/AzureStorageService.java | 51 ++- .../azure/AzureStorageServiceImpl.java | 328 ++++++++---------- .../azure/AzureStorageSettings.java | 78 ++++- .../azure/AzureRepositorySettingsTests.java | 3 +- .../azure/AzureSnapshotRestoreTests.java | 56 ++- .../azure/AzureStorageServiceMock.java | 40 ++- .../azure/AzureStorageServiceTests.java | 193 +++++++---- 12 files changed, 506 insertions(+), 451 deletions(-) delete mode 100644 plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureServiceDisableException.java delete mode 100644 plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureServiceRemoteException.java diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 7e8987ae94576..56ae5b6af31ba 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -20,45 +20,43 @@ package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.LocationMode; + import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cluster.metadata.RepositoryMetaData; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; - import java.io.IOException; import java.io.InputStream; import java.net.URISyntaxException; -import java.util.Locale; import java.util.Map; +import static java.util.Collections.emptyMap; + import static org.elasticsearch.repositories.azure.AzureRepository.Repository; public class AzureBlobStore extends AbstractComponent implements BlobStore { - private final AzureStorageService client; + private final AzureStorageService service; private final String clientName; - private final LocationMode locMode; private final String container; + private final LocationMode locationMode; - public AzureBlobStore(RepositoryMetaData metadata, Settings settings, - AzureStorageService client) throws URISyntaxException, StorageException { + public AzureBlobStore(RepositoryMetaData metadata, Settings settings, AzureStorageService service) + throws URISyntaxException, StorageException { super(settings); - this.client = client; this.container = Repository.CONTAINER_SETTING.get(metadata.settings()); this.clientName = Repository.CLIENT_NAME.get(metadata.settings()); - - String modeStr = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); - if (Strings.hasLength(modeStr)) { - this.locMode = LocationMode.valueOf(modeStr.toUpperCase(Locale.ROOT)); - } else { - this.locMode = LocationMode.PRIMARY_ONLY; - } + this.service = service; + // locationMode is set per repository, not per client + this.locationMode = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); + final Map prevSettings = this.service.updateClientsSettings(emptyMap()); + final Map newSettings = AzureStorageSettings.overrideLocationMode(prevSettings, this.locationMode); + this.service.updateClientsSettings(newSettings); } @Override @@ -70,7 +68,11 @@ public String toString() { * Gets the configured {@link LocationMode} for the Azure storage requests. */ public LocationMode getLocationMode() { - return locMode; + return locationMode; + } + + public String getClientName() { + return clientName; } @Override @@ -79,12 +81,13 @@ public BlobContainer blobContainer(BlobPath path) { } @Override - public void delete(BlobPath path) { - String keyPath = path.buildAsString(); + public void delete(BlobPath path) throws IOException { + final String keyPath = path.buildAsString(); try { - this.client.deleteFiles(this.clientName, this.locMode, container, keyPath); + service.deleteFiles(clientName, container, keyPath); } catch (URISyntaxException | StorageException e) { - logger.warn("can not remove [{}] in container {{}}: {}", keyPath, container, e.getMessage()); + logger.warn("cannot access [{}] in container {{}}: {}", keyPath, container, e.getMessage()); + throw new IOException(e); } } @@ -92,37 +95,32 @@ public void delete(BlobPath path) { public void close() { } - public boolean doesContainerExist() - { - return this.client.doesContainerExist(this.clientName, this.locMode, container); + public boolean containerExist() throws URISyntaxException, StorageException { + return service.doesContainerExist(clientName, container); } - public boolean blobExists(String blob) throws URISyntaxException, StorageException - { - return this.client.blobExists(this.clientName, this.locMode, container, blob); + public boolean blobExists(String blob) throws URISyntaxException, StorageException { + return service.blobExists(clientName, container, blob); } - public void deleteBlob(String blob) throws URISyntaxException, StorageException - { - this.client.deleteBlob(this.clientName, this.locMode, container, blob); + public void deleteBlob(String blob) throws URISyntaxException, StorageException { + service.deleteBlob(clientName, container, blob); } - public InputStream getInputStream(String blob) throws URISyntaxException, StorageException, IOException - { - return this.client.getInputStream(this.clientName, this.locMode, container, blob); + public InputStream getInputStream(String blob) throws URISyntaxException, StorageException, IOException { + return service.getInputStream(clientName, container, blob); } public Map listBlobsByPrefix(String keyPath, String prefix) throws URISyntaxException, StorageException { - return this.client.listBlobsByPrefix(this.clientName, this.locMode, container, keyPath, prefix); + return service.listBlobsByPrefix(clientName, container, keyPath, prefix); } - public void moveBlob(String sourceBlob, String targetBlob) throws URISyntaxException, StorageException - { - this.client.moveBlob(this.clientName, this.locMode, container, sourceBlob, targetBlob); + public void moveBlob(String sourceBlob, String targetBlob) throws URISyntaxException, StorageException { + service.moveBlob(clientName, container, sourceBlob, targetBlob); } public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws URISyntaxException, StorageException { - this.client.writeBlob(this.clientName, this.locMode, container, blobName, inputStream, blobSize); + service.writeBlob(clientName, container, blobName, inputStream, blobSize); } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 06bf10fb2e292..47b398a4c2fd3 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -21,6 +21,8 @@ import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; + +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; @@ -33,6 +35,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.snapshots.SnapshotCreationException; import org.elasticsearch.snapshots.SnapshotId; import java.io.IOException; @@ -60,19 +63,19 @@ public class AzureRepository extends BlobStoreRepository { public static final String TYPE = "azure"; public static final class Repository { - @Deprecated // Replaced by client public static final Setting ACCOUNT_SETTING = new Setting<>("account", "default", Function.identity(), Property.NodeScope, Property.Deprecated); public static final Setting CLIENT_NAME = new Setting<>("client", ACCOUNT_SETTING, Function.identity()); - public static final Setting CONTAINER_SETTING = new Setting<>("container", "elasticsearch-snapshots", Function.identity(), Property.NodeScope); public static final Setting BASE_PATH_SETTING = Setting.simpleString("base_path", Property.NodeScope); - public static final Setting LOCATION_MODE_SETTING = Setting.simpleString("location_mode", Property.NodeScope); + public static final Setting LOCATION_MODE_SETTING = new Setting<>("location_mode", + s -> LocationMode.PRIMARY_ONLY.toString(), s -> LocationMode.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope); public static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, MAX_CHUNK_SIZE, Property.NodeScope); public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope); + public static final Setting READONLY_SETTING = Setting.boolSetting("readonly", false, Property.NodeScope); } private final AzureBlobStore blobStore; @@ -81,45 +84,32 @@ public static final class Repository { private final boolean compress; private final boolean readonly; - public AzureRepository(RepositoryMetaData metadata, Environment environment, - NamedXContentRegistry namedXContentRegistry, AzureStorageService storageService) - throws IOException, URISyntaxException, StorageException { + public AzureRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, + AzureStorageService storageService) throws IOException, URISyntaxException, StorageException { super(metadata, environment.settings(), namedXContentRegistry); - - blobStore = new AzureBlobStore(metadata, environment.settings(), storageService); - String container = Repository.CONTAINER_SETTING.get(metadata.settings()); + this.blobStore = new AzureBlobStore(metadata, environment.settings(), storageService); this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings()); this.compress = Repository.COMPRESS_SETTING.get(metadata.settings()); - String modeStr = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); - Boolean forcedReadonly = metadata.settings().getAsBoolean("readonly", null); // If the user explicitly did not define a readonly value, we set it by ourselves depending on the location mode setting. // For secondary_only setting, the repository should be read only - if (forcedReadonly == null) { - if (Strings.hasLength(modeStr)) { - LocationMode locationMode = LocationMode.valueOf(modeStr.toUpperCase(Locale.ROOT)); - this.readonly = locationMode == LocationMode.SECONDARY_ONLY; - } else { - this.readonly = false; - } + if (Repository.READONLY_SETTING.exists(metadata.settings())) { + this.readonly = Repository.READONLY_SETTING.get(metadata.settings()); } else { - readonly = forcedReadonly; + this.readonly = this.blobStore.getLocationMode() == LocationMode.SECONDARY_ONLY; } - - String basePath = Repository.BASE_PATH_SETTING.get(metadata.settings()); - + final String basePath = Strings.trimLeadingCharacter(Repository.BASE_PATH_SETTING.get(metadata.settings()), '/'); if (Strings.hasLength(basePath)) { // Remove starting / if any - basePath = Strings.trimLeadingCharacter(basePath, '/'); BlobPath path = new BlobPath(); - for(String elem : basePath.split("/")) { + for(final String elem : basePath.split("/")) { path = path.add(elem); } this.basePath = path; } else { this.basePath = BlobPath.cleanPath(); } - logger.debug("using container [{}], chunk_size [{}], compress [{}], base_path [{}]", - container, chunkSize, compress, basePath); + logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", blobStore, chunkSize, compress, basePath)); } /** @@ -153,9 +143,13 @@ protected ByteSizeValue chunkSize() { @Override public void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData clusterMetadata) { - if (blobStore.doesContainerExist() == false) { - throw new IllegalArgumentException("The bucket [" + blobStore + "] does not exist. Please create it before " + - " creating an azure snapshot repository backed by it."); + try { + if (blobStore.containerExist() == false) { + throw new IllegalArgumentException("The bucket [" + blobStore + "] does not exist. Please create it before " + + " creating an azure snapshot repository backed by it."); + } + } catch (URISyntaxException | StorageException e) { + throw new SnapshotCreationException(metadata.name(), snapshotId, e); } super.initializeSnapshot(snapshotId, indices, clusterMetadata); } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index c0126cb8df065..eb92fd198c570 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -24,9 +24,9 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ReInitializablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.Repository; - import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -35,24 +35,20 @@ /** * A plugin to add a repository type that writes to and from the Azure cloud storage service. */ -public class AzureRepositoryPlugin extends Plugin implements RepositoryPlugin { - - private final Map clientsSettings; +public class AzureRepositoryPlugin extends Plugin implements RepositoryPlugin, ReInitializablePlugin { - // overridable for tests - protected AzureStorageService createStorageService(Settings settings) { - return new AzureStorageServiceImpl(settings, clientsSettings); - } + // protected for testing + final AzureStorageService azureStoreService; public AzureRepositoryPlugin(Settings settings) { // eagerly load client settings so that secure settings are read - clientsSettings = AzureStorageSettings.load(settings); + this.azureStoreService = new AzureStorageServiceImpl(settings); } @Override public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { return Collections.singletonMap(AzureRepository.TYPE, - (metadata) -> new AzureRepository(metadata, env, namedXContentRegistry, createStorageService(env.settings()))); + (metadata) -> new AzureRepository(metadata, env, namedXContentRegistry, azureStoreService)); } @Override @@ -67,4 +63,12 @@ public List> getSettings() { AzureStorageSettings.PROXY_PORT_SETTING ); } + + @Override + public boolean reinit(Settings settings) { + // secure settings should be readable + final Map clientsSettings = AzureStorageSettings.load(settings); + azureStoreService.updateClientsSettings(clientsSettings); + return true; + } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureServiceDisableException.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureServiceDisableException.java deleted file mode 100644 index a100079668b54..0000000000000 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureServiceDisableException.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.azure; - -public class AzureServiceDisableException extends IllegalStateException { - public AzureServiceDisableException(String msg) { - super(msg); - } - - public AzureServiceDisableException(String msg, Throwable cause) { - super(msg, cause); - } -} diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureServiceRemoteException.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureServiceRemoteException.java deleted file mode 100644 index 3f20e29505751..0000000000000 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureServiceRemoteException.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.azure; - -public class AzureServiceRemoteException extends IllegalStateException { - public AzureServiceRemoteException(String msg) { - super(msg); - } - - public AzureServiceRemoteException(String msg, Throwable cause) { - super(msg, cause); - } -} diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 3337c07e6eece..dd832bcb80de1 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -19,9 +19,12 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.LocationMode; +import com.microsoft.azure.storage.OperationContext; import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobClient; + import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -29,6 +32,7 @@ import java.io.InputStream; import java.net.URISyntaxException; import java.util.Map; +import java.util.function.Supplier; /** * Azure Storage Service interface @@ -36,32 +40,49 @@ */ public interface AzureStorageService { + /** + * Creates a {@code CloudBlobClient} on each invocation using the current client + * settings. CloudBlobClient is not thread safe and the settings can change, + * therefore the instance is not cache-able and should only be reused inside a + * thread for logically coupled ops. The {@code OperationContext} is used to + * specify the proxy, but a new context is *required* for each call. + */ + Tuple> client(String clientName); + + /** + * Updates settings for building clients. Future client requests will use the + * new settings. + * + * @param clientsSettings + * the new settings + * @return the old settings + */ + Map updateClientsSettings(Map clientsSettings); + ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES); ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB); - boolean doesContainerExist(String account, LocationMode mode, String container); + boolean doesContainerExist(String account, String container) throws URISyntaxException, StorageException; - void removeContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException; + void removeContainer(String account, String container) throws URISyntaxException, StorageException; - void createContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException; + void createContainer(String account, String container) throws URISyntaxException, StorageException; - void deleteFiles(String account, LocationMode mode, String container, String path) throws URISyntaxException, StorageException; + void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException; - boolean blobExists(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException; + boolean blobExists(String account, String container, String blob) throws URISyntaxException, StorageException; - void deleteBlob(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException; + void deleteBlob(String account, String container, String blob) throws URISyntaxException, StorageException; - InputStream getInputStream(String account, LocationMode mode, String container, String blob) - throws URISyntaxException, StorageException, IOException; + InputStream getInputStream(String account, String container, String blob) throws URISyntaxException, StorageException, IOException; - Map listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) - throws URISyntaxException, StorageException; + Map listBlobsByPrefix(String account, String container, String keyPath, String prefix) + throws URISyntaxException, StorageException; - void moveBlob(String account, LocationMode mode, String container, String sourceBlob, String targetBlob) - throws URISyntaxException, StorageException; + void moveBlob(String account, String container, String sourceBlob, String targetBlob) throws URISyntaxException, StorageException; - void writeBlob(String account, LocationMode mode, String container, String blobName, InputStream inputStream, long blobSize) throws - URISyntaxException, StorageException; + void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize) + throws URISyntaxException, StorageException; static InputStream giveSocketPermissionsToStream(InputStream stream) { return new InputStream() { diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java index f21dbdfd269f4..b38656f5409f9 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java @@ -20,7 +20,6 @@ package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.OperationContext; import com.microsoft.azure.storage.RetryExponentialRetry; import com.microsoft.azure.storage.RetryPolicy; @@ -34,167 +33,131 @@ import com.microsoft.azure.storage.blob.DeleteSnapshotsOption; import com.microsoft.azure.storage.blob.ListBlobItem; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.repositories.RepositoryException; import java.io.InputStream; import java.net.URI; import java.net.URISyntaxException; +import java.security.InvalidKeyException; import java.util.EnumSet; -import java.util.HashMap; import java.util.Map; +import java.util.function.Supplier; -public class AzureStorageServiceImpl extends AbstractComponent implements AzureStorageService { +import static java.util.Collections.emptyMap; - final Map storageSettings; +public class AzureStorageServiceImpl extends AbstractComponent implements AzureStorageService { - final Map clients = new HashMap<>(); + // 'package' for testing + volatile Map storageSettings = emptyMap(); - public AzureStorageServiceImpl(Settings settings, Map storageSettings) { + public AzureStorageServiceImpl(Settings settings) { super(settings); - - this.storageSettings = storageSettings; - - if (storageSettings.isEmpty()) { - // If someone did not register any settings, they basically can't use the plugin - throw new IllegalArgumentException("If you want to use an azure repository, you need to define a client configuration."); - } - - logger.debug("starting azure storage client instance"); - - // We register all regular azure clients - for (Map.Entry azureStorageSettingsEntry : this.storageSettings.entrySet()) { - logger.debug("registering regular client for account [{}]", azureStorageSettingsEntry.getKey()); - createClient(azureStorageSettingsEntry.getValue()); - } - } - - void createClient(AzureStorageSettings azureStorageSettings) { - try { - logger.trace("creating new Azure storage client using account [{}], key [{}], endpoint suffix [{}]", - azureStorageSettings.getAccount(), azureStorageSettings.getKey(), azureStorageSettings.getEndpointSuffix()); - - String storageConnectionString = - "DefaultEndpointsProtocol=https;" - + "AccountName=" + azureStorageSettings.getAccount() + ";" - + "AccountKey=" + azureStorageSettings.getKey(); - - String endpointSuffix = azureStorageSettings.getEndpointSuffix(); - if (endpointSuffix != null && !endpointSuffix.isEmpty()) { - storageConnectionString += ";EndpointSuffix=" + endpointSuffix; - } - // Retrieve storage account from connection-string. - CloudStorageAccount storageAccount = CloudStorageAccount.parse(storageConnectionString); - - // Create the blob client. - CloudBlobClient client = storageAccount.createCloudBlobClient(); - - // Register the client - this.clients.put(azureStorageSettings.getAccount(), client); - } catch (Exception e) { - logger.error("can not create azure storage client: {}", e.getMessage()); - } + // eagerly load client settings so that secure settings are read + final Map clientsSettings = AzureStorageSettings.load(settings); + updateClientsSettings(clientsSettings); } - CloudBlobClient getSelectedClient(String clientName, LocationMode mode) { - logger.trace("selecting a client named [{}], mode [{}]", clientName, mode.name()); - AzureStorageSettings azureStorageSettings = this.storageSettings.get(clientName); + @Override + public Tuple> client(String clientName) { + final AzureStorageSettings azureStorageSettings = this.storageSettings.get(clientName); if (azureStorageSettings == null) { - throw new IllegalArgumentException("Can not find named azure client [" + clientName + "]. Check your settings."); + throw new SettingsException("Cannot find an azure client by the name [" + clientName + "]. Check your settings."); } - - CloudBlobClient client = this.clients.get(azureStorageSettings.getAccount()); - - if (client == null) { - throw new IllegalArgumentException("Can not find an azure client named [" + azureStorageSettings.getAccount() + "]"); + try { + return new Tuple<>(buildClient(azureStorageSettings), () -> buildOperationContext(azureStorageSettings)); + } catch (InvalidKeyException | URISyntaxException | IllegalArgumentException e) { + throw new SettingsException("Invalid azure client [" + clientName + "] settings.", e); } + } - // NOTE: for now, just set the location mode in case it is different; - // only one mode per storage clientName can be active at a time - client.getDefaultRequestOptions().setLocationMode(mode); - - // Set timeout option if the user sets cloud.azure.storage.timeout or cloud.azure.storage.xxx.timeout (it's negative by default) - if (azureStorageSettings.getTimeout().getSeconds() > 0) { - try { - int timeout = (int) azureStorageSettings.getTimeout().getMillis(); - client.getDefaultRequestOptions().setTimeoutIntervalInMs(timeout); - } catch (ClassCastException e) { - throw new IllegalArgumentException("Can not convert [" + azureStorageSettings.getTimeout() + - "]. It can not be longer than 2,147,483,647ms."); + protected CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { + final CloudBlobClient client = createClient(azureStorageSettings); + // Set timeout option if the user sets cloud.azure.storage.timeout or + // cloud.azure.storage.xxx.timeout (it's negative by default) + final long timeout = azureStorageSettings.getTimeout().getMillis(); + if (timeout > 0) { + if (timeout > Integer.MAX_VALUE) { + throw new IllegalArgumentException("Timeout [" + azureStorageSettings.getTimeout() + "] exceeds 2,147,483,647ms."); } + client.getDefaultRequestOptions().setTimeoutIntervalInMs((int) timeout); } - // We define a default exponential retry policy - client.getDefaultRequestOptions().setRetryPolicyFactory( - new RetryExponentialRetry(RetryPolicy.DEFAULT_CLIENT_BACKOFF, azureStorageSettings.getMaxRetries())); - + client.getDefaultRequestOptions() + .setRetryPolicyFactory(new RetryExponentialRetry(RetryPolicy.DEFAULT_CLIENT_BACKOFF, azureStorageSettings.getMaxRetries())); + client.getDefaultRequestOptions().setLocationMode(azureStorageSettings.getLocationMode()); return client; } - private OperationContext generateOperationContext(String clientName) { - OperationContext context = new OperationContext(); - AzureStorageSettings azureStorageSettings = this.storageSettings.get(clientName); - - if (azureStorageSettings.getProxy() != null) { - context.setProxy(azureStorageSettings.getProxy()); - } + protected CloudBlobClient createClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { + final String connectionString = azureStorageSettings.buildConnectionString(); + return CloudStorageAccount.parse(connectionString).createCloudBlobClient(); + } + protected OperationContext buildOperationContext(AzureStorageSettings azureStorageSettings) { + final OperationContext context = new OperationContext(); + context.setProxy(azureStorageSettings.getProxy()); return context; } @Override - public boolean doesContainerExist(String account, LocationMode mode, String container) { - try { - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); - return SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, generateOperationContext(account))); - } catch (Exception e) { - logger.error("can not access container [{}]", container); - } - return false; + public Map updateClientsSettings(Map clientsSettings) { + final Map prevSettings = this.storageSettings; + this.storageSettings = MapBuilder.newMapBuilder(clientsSettings).immutableMap(); + // clients are built lazily by {@link client(String)} + return prevSettings; } @Override - public void removeContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException { - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); - logger.trace("removing container [{}]", container); - SocketAccess.doPrivilegedException(() -> blobContainer.deleteIfExists(null, null, generateOperationContext(account))); + public boolean doesContainerExist(String account, String container) throws URISyntaxException, StorageException { + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + return SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, client.v2().get())); } @Override - public void createContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException { + public void removeContainer(String account, String container) throws URISyntaxException, StorageException { + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + logger.trace(() -> new ParameterizedMessage("removing container [{}]", container)); + SocketAccess.doPrivilegedException(() -> blobContainer.deleteIfExists(null, null, client.v2().get())); + } + + @Override + public void createContainer(String account, String container) throws URISyntaxException, StorageException { try { - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); - logger.trace("creating container [{}]", container); - SocketAccess.doPrivilegedException(() -> blobContainer.createIfNotExists(null, null, generateOperationContext(account))); - } catch (IllegalArgumentException e) { - logger.trace((Supplier) () -> new ParameterizedMessage("fails creating container [{}]", container), e); + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + logger.trace(() -> new ParameterizedMessage("creating container [{}]", container)); + SocketAccess.doPrivilegedException(() -> blobContainer.createIfNotExists(null, null, client.v2().get())); + } catch (final IllegalArgumentException e) { + logger.trace(() -> new ParameterizedMessage("failed creating container [{}]", container), e); throw new RepositoryException(container, e.getMessage(), e); } } @Override - public void deleteFiles(String account, LocationMode mode, String container, String path) throws URISyntaxException, StorageException { - logger.trace("delete files container [{}], path [{}]", container, path); - - // Container name must be lower case. - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); + public void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException { + final Tuple> client = client(account); + // container name must be lower case. + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + logger.trace(() -> new ParameterizedMessage("delete files container [{}], path [{}]", container, path)); SocketAccess.doPrivilegedVoidException(() -> { if (blobContainer.exists()) { - // We list the blobs using a flat blob listing mode - for (ListBlobItem blobItem : blobContainer.listBlobs(path, true, EnumSet.noneOf(BlobListingDetails.class), null, - generateOperationContext(account))) { - String blobName = blobNameFromUri(blobItem.getUri()); - logger.trace("removing blob [{}] full URI was [{}]", blobName, blobItem.getUri()); - deleteBlob(account, mode, container, blobName); + // list the blobs using a flat blob listing mode + for (final ListBlobItem blobItem : blobContainer.listBlobs(path, true, EnumSet.noneOf(BlobListingDetails.class), null, + client.v2().get())) { + final String blobName = blobNameFromUri(blobItem.getUri()); + logger.trace(() -> new ParameterizedMessage("removing blob [{}] full URI was [{}]", blobName, blobItem.getUri())); + // don't call {@code #deleteBlob}, use the same client + final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blobName); + azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, client.v2().get()); } } }); @@ -206,85 +169,82 @@ public void deleteFiles(String account, LocationMode mode, String container, Str * @param uri URI to parse * @return The blob name relative to the container */ - public static String blobNameFromUri(URI uri) { - String path = uri.getPath(); - + static String blobNameFromUri(URI uri) { + final String path = uri.getPath(); // We remove the container name from the path // The 3 magic number cames from the fact if path is /container/path/to/myfile // First occurrence is empty "/" // Second occurrence is "container // Last part contains "path/to/myfile" which is what we want to get - String[] splits = path.split("/", 3); - + final String[] splits = path.split("/", 3); // We return the remaining end of the string return splits[2]; } @Override - public boolean blobExists(String account, LocationMode mode, String container, String blob) - throws URISyntaxException, StorageException { + public boolean blobExists(String account, String container, String blob) + throws URISyntaxException, StorageException { // Container name must be lower case. - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); - if (SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, generateOperationContext(account)))) { - CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob); - return SocketAccess.doPrivilegedException(() -> azureBlob.exists(null, null, generateOperationContext(account))); - } - - return false; + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + return SocketAccess.doPrivilegedException(() -> { + if (blobContainer.exists(null, null, client.v2().get())) { + final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob); + return azureBlob.exists(null, null, client.v2().get()); + } + return false; + }); } @Override - public void deleteBlob(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException { - logger.trace("delete blob for container [{}], blob [{}]", container, blob); - + public void deleteBlob(String account, String container, String blob) throws URISyntaxException, StorageException { + final Tuple> client = client(account); // Container name must be lower case. - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); - if (SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, generateOperationContext(account)))) { - logger.trace("container [{}]: blob [{}] found. removing.", container, blob); - CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob); - SocketAccess.doPrivilegedVoidException(() -> azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, - generateOperationContext(account))); - } + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + logger.trace(() -> new ParameterizedMessage("delete blob for container [{}], blob [{}]", container, blob)); + SocketAccess.doPrivilegedVoidException(() -> { + if (blobContainer.exists(null, null, client.v2().get())) { + final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob); + logger.trace(() -> new ParameterizedMessage("container [{}]: blob [{}] found. removing.", container, blob)); + azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, client.v2().get()); + } + }); } @Override - public InputStream getInputStream(String account, LocationMode mode, String container, String blob) throws URISyntaxException, + public InputStream getInputStream(String account, String container, String blob) throws URISyntaxException, StorageException { - logger.trace("reading container [{}], blob [{}]", container, blob); - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlockBlob blockBlobReference = client.getContainerReference(container).getBlockBlobReference(blob); - BlobInputStream is = SocketAccess.doPrivilegedException(() -> - blockBlobReference.openInputStream(null, null, generateOperationContext(account))); + final Tuple> client = client(account); + final CloudBlockBlob blockBlobReference = client.v1().getContainerReference(container).getBlockBlobReference(blob); + logger.trace(() -> new ParameterizedMessage("reading container [{}], blob [{}]", container, blob)); + final BlobInputStream is = SocketAccess.doPrivilegedException(() -> + blockBlobReference.openInputStream(null, null, client.v2().get())); return AzureStorageService.giveSocketPermissionsToStream(is); } @Override - public Map listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) + public Map listBlobsByPrefix(String account, String container, String keyPath, String prefix) throws URISyntaxException, StorageException { // NOTE: this should be here: if (prefix == null) prefix = ""; // however, this is really inefficient since deleteBlobsByPrefix enumerates everything and // then does a prefix match on the result; it should just call listBlobsByPrefix with the prefix! - - logger.debug("listing container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix); - MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); - EnumSet enumBlobListingDetails = EnumSet.of(BlobListingDetails.METADATA); - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); + final MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); + final EnumSet enumBlobListingDetails = EnumSet.of(BlobListingDetails.METADATA); + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + logger.trace(() -> new ParameterizedMessage("listing container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix)); SocketAccess.doPrivilegedVoidException(() -> { if (blobContainer.exists()) { - for (ListBlobItem blobItem : blobContainer.listBlobs(keyPath + (prefix == null ? "" : prefix), false, - enumBlobListingDetails, null, generateOperationContext(account))) { - URI uri = blobItem.getUri(); - logger.trace("blob url [{}]", uri); - + for (final ListBlobItem blobItem : blobContainer.listBlobs(keyPath + (prefix == null ? "" : prefix), false, + enumBlobListingDetails, null, client.v2().get())) { + final URI uri = blobItem.getUri(); + logger.trace(() -> new ParameterizedMessage("blob url [{}]", uri)); // uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/ // this requires 1 + container.length() + 1, with each 1 corresponding to one of the / - String blobPath = uri.getPath().substring(1 + container.length() + 1); - BlobProperties properties = ((CloudBlockBlob) blobItem).getProperties(); - String name = blobPath.substring(keyPath.length()); - logger.trace("blob url [{}], name [{}], size [{}]", uri, name, properties.getLength()); + final String blobPath = uri.getPath().substring(1 + container.length() + 1); + final BlobProperties properties = ((CloudBlockBlob) blobItem).getProperties(); + final String name = blobPath.substring(keyPath.length()); + logger.trace(() -> new ParameterizedMessage("blob url [{}], name [{}], size [{}]", uri, name, properties.getLength())); blobsBuilder.put(name, new PlainBlobMetaData(name, properties.getLength())); } } @@ -293,31 +253,33 @@ enumBlobListingDetails, null, generateOperationContext(account))) { } @Override - public void moveBlob(String account, LocationMode mode, String container, String sourceBlob, String targetBlob) + public void moveBlob(String account, String container, String sourceBlob, String targetBlob) throws URISyntaxException, StorageException { - logger.debug("moveBlob container [{}], sourceBlob [{}], targetBlob [{}]", container, sourceBlob, targetBlob); - - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); - CloudBlockBlob blobSource = blobContainer.getBlockBlobReference(sourceBlob); - if (SocketAccess.doPrivilegedException(() -> blobSource.exists(null, null, generateOperationContext(account)))) { - CloudBlockBlob blobTarget = blobContainer.getBlockBlobReference(targetBlob); - SocketAccess.doPrivilegedVoidException(() -> { - blobTarget.startCopy(blobSource, null, null, null, generateOperationContext(account)); - blobSource.delete(DeleteSnapshotsOption.NONE, null, null, generateOperationContext(account)); - }); - logger.debug("moveBlob container [{}], sourceBlob [{}], targetBlob [{}] -> done", container, sourceBlob, targetBlob); - } + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + final CloudBlockBlob blobSource = blobContainer.getBlockBlobReference(sourceBlob); + logger.trace(() -> new ParameterizedMessage("moveBlob container [{}], sourceBlob [{}], targetBlob [{}]", container, sourceBlob, + targetBlob)); + SocketAccess.doPrivilegedVoidException(() -> { + if (blobSource.exists(null, null, client.v2().get())) { + final CloudBlockBlob blobTarget = blobContainer.getBlockBlobReference(targetBlob); + blobTarget.startCopy(blobSource, null, null, null, client.v2().get()); + blobSource.delete(DeleteSnapshotsOption.NONE, null, null, client.v2().get()); + logger.trace(() -> new ParameterizedMessage("moveBlob container [{}], sourceBlob [{}], targetBlob [{}] -> done", container, + sourceBlob, targetBlob)); + } + }); } @Override - public void writeBlob(String account, LocationMode mode, String container, String blobName, InputStream inputStream, long blobSize) + public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize) throws URISyntaxException, StorageException { - logger.trace("writeBlob({}, stream, {})", blobName, blobSize); - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); - CloudBlockBlob blob = blobContainer.getBlockBlobReference(blobName); - SocketAccess.doPrivilegedVoidException(() -> blob.upload(inputStream, blobSize, null, null, generateOperationContext(account))); - logger.trace("writeBlob({}, stream, {}) - done", blobName, blobSize); + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + final CloudBlockBlob blob = blobContainer.getBlockBlobReference(blobName); + logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {})", blobName, blobSize)); + SocketAccess.doPrivilegedVoidException(() -> blob.upload(inputStream, blobSize, null, null, client.v2().get())); + logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {}) - done", blobName, blobSize)); } + } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java index e360558933cc1..6423fc1ce3c17 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java @@ -19,8 +19,10 @@ package org.elasticsearch.repositories.azure; +import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.RetryPolicy; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; @@ -29,7 +31,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.TimeValue; - import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Proxy; @@ -39,7 +40,7 @@ import java.util.Locale; import java.util.Map; -public final class AzureStorageSettings { +final class AzureStorageSettings { // prefix for azure client settings private static final String AZURE_CLIENT_PREFIX_KEY = "azure.client."; @@ -86,22 +87,33 @@ public final class AzureStorageSettings { private final TimeValue timeout; private final int maxRetries; private final Proxy proxy; + private final LocationMode locationMode; + // copy-constructor + private AzureStorageSettings(String account, String key, String endpointSuffix, TimeValue timeout, int maxRetries, Proxy proxy, + LocationMode locationMode) { + this.account = account; + this.key = key; + this.endpointSuffix = endpointSuffix; + this.timeout = timeout; + this.maxRetries = maxRetries; + this.proxy = proxy; + this.locationMode = locationMode; + } - public AzureStorageSettings(String account, String key, String endpointSuffix, TimeValue timeout, int maxRetries, + AzureStorageSettings(String account, String key, String endpointSuffix, TimeValue timeout, int maxRetries, Proxy.Type proxyType, String proxyHost, Integer proxyPort) { this.account = account; this.key = key; this.endpointSuffix = endpointSuffix; this.timeout = timeout; this.maxRetries = maxRetries; - // Register the proxy if we have any // Validate proxy settings - if (proxyType.equals(Proxy.Type.DIRECT) && (proxyPort != 0 || Strings.hasText(proxyHost))) { + if (proxyType.equals(Proxy.Type.DIRECT) && ((proxyPort != 0) || Strings.hasText(proxyHost))) { throw new SettingsException("Azure Proxy port or host have been set but proxy type is not defined."); } - if (proxyType.equals(Proxy.Type.DIRECT) == false && (proxyPort == 0 || Strings.isEmpty(proxyHost))) { + if ((proxyType.equals(Proxy.Type.DIRECT) == false) && ((proxyPort == 0) || Strings.isEmpty(proxyHost))) { throw new SettingsException("Azure Proxy type has been set but proxy host or port is not defined."); } @@ -110,10 +122,11 @@ public AzureStorageSettings(String account, String key, String endpointSuffix, T } else { try { proxy = new Proxy(proxyType, new InetSocketAddress(InetAddress.getByName(proxyHost), proxyPort)); - } catch (UnknownHostException e) { + } catch (final UnknownHostException e) { throw new SettingsException("Azure proxy host is unknown.", e); } } + this.locationMode = LocationMode.PRIMARY_ONLY; } public String getKey() { @@ -140,15 +153,33 @@ public Proxy getProxy() { return proxy; } + public String buildConnectionString() { + final StringBuilder connectionStringBuilder = new StringBuilder(); + connectionStringBuilder.append("DefaultEndpointsProtocol=https") + .append(";AccountName=") + .append(account) + .append(";AccountKey=") + .append(key); + if (Strings.hasText(endpointSuffix)) { + connectionStringBuilder.append(";EndpointSuffix=").append(endpointSuffix); + } + return connectionStringBuilder.toString(); + } + + public LocationMode getLocationMode() { + return locationMode; + } + @Override public String toString() { final StringBuilder sb = new StringBuilder("AzureStorageSettings{"); - sb.append(", account='").append(account).append('\''); + sb.append("account='").append(account).append('\''); sb.append(", key='").append(key).append('\''); sb.append(", timeout=").append(timeout); sb.append(", endpointSuffix='").append(endpointSuffix).append('\''); sb.append(", maxRetries=").append(maxRetries); sb.append(", proxy=").append(proxy); + sb.append(", locationMode='").append(locationMode).append('\''); sb.append('}'); return sb.toString(); } @@ -160,17 +191,20 @@ public String toString() { */ public static Map load(Settings settings) { // Get the list of existing named configurations - Map storageSettings = new HashMap<>(); - for (String clientName : ACCOUNT_SETTING.getNamespaces(settings)) { + final Map storageSettings = new HashMap<>(); + for (final String clientName : ACCOUNT_SETTING.getNamespaces(settings)) { storageSettings.put(clientName, getClientSettings(settings, clientName)); } - - if (storageSettings.containsKey("default") == false && storageSettings.isEmpty() == false) { + if (storageSettings.isEmpty()) { + throw new SettingsException("If you want to use an azure repository, you need to define a client configuration."); + } + if (storageSettings.containsKey("default") == false) { // in case no setting named "default" has been set, let's define our "default" // as the first named config we get - AzureStorageSettings defaultSettings = storageSettings.values().iterator().next(); + final AzureStorageSettings defaultSettings = storageSettings.values().iterator().next(); storageSettings.put("default", defaultSettings); } + assert storageSettings.containsKey("default") : "always have 'default'"; return Collections.unmodifiableMap(storageSettings); } @@ -191,13 +225,25 @@ static AzureStorageSettings getClientSettings(Settings settings, String clientNa private static T getConfigValue(Settings settings, String clientName, Setting.AffixSetting clientSetting) { - Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); + final Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); return concreteSetting.get(settings); } public static T getValue(Settings settings, String groupName, Setting setting) { - Setting.AffixKey k = (Setting.AffixKey) setting.getRawKey(); - String fullKey = k.toConcreteKey(groupName).toString(); + final Setting.AffixKey k = (Setting.AffixKey) setting.getRawKey(); + final String fullKey = k.toConcreteKey(groupName).toString(); return setting.getConcreteSetting(fullKey).get(settings); } + + static Map overrideLocationMode(Map clientsSettings, + LocationMode locationMode) { + final MapBuilder mapBuilder = new MapBuilder<>(); + for (final Map.Entry entry : clientsSettings.entrySet()) { + final AzureStorageSettings azureSettings = new AzureStorageSettings(entry.getValue().account, entry.getValue().key, + entry.getValue().endpointSuffix, entry.getValue().timeout, entry.getValue().maxRetries, entry.getValue().proxy, + locationMode); + mapBuilder.put(entry.getKey(), azureSettings); + } + return mapBuilder.immutableMap(); + } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java index 01b26bad343d5..304495ef35444 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java @@ -34,6 +34,7 @@ import java.net.URISyntaxException; import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; public class AzureRepositorySettingsTests extends ESTestCase { @@ -44,7 +45,7 @@ private AzureRepository azureRepository(Settings settings) throws StorageExcepti .put(settings) .build(); return new AzureRepository(new RepositoryMetaData("foo", "azure", internalSettings), - TestEnvironment.newEnvironment(internalSettings), NamedXContentRegistry.EMPTY, null); + TestEnvironment.newEnvironment(internalSettings), NamedXContentRegistry.EMPTY, mock(AzureStorageService.class)); } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java index 439a9d567f1a4..10163bb2f31df 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java @@ -19,9 +19,7 @@ package org.elasticsearch.repositories.azure; - import com.carrotsearch.randomizedtesting.RandomizedTest; -import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; @@ -77,9 +75,9 @@ private static Settings.Builder generateMockSettings() { return Settings.builder().setSecureSettings(generateMockSecureSettings()); } + @SuppressWarnings("resource") private static AzureStorageService getAzureStorageService() { - return new AzureStorageServiceImpl(generateMockSettings().build(), - AzureStorageSettings.load(generateMockSettings().build())); + return new AzureRepositoryPlugin(generateMockSettings().build()).azureStoreService; } @Override @@ -94,7 +92,7 @@ private static String getContainerName() { * there mustn't be a hyphen between the 2 concatenated numbers * (can't have 2 consecutives hyphens on Azure containers) */ - String testName = "snapshot-itest-" + final String testName = "snapshot-itest-" .concat(RandomizedTest.getContext().getRunnerSeedAsString().toLowerCase(Locale.ROOT)); return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; } @@ -123,7 +121,7 @@ private static void createTestContainer(String containerName) throws Exception { // It could happen that we run this test really close to a previous one // so we might need some time to be able to create the container assertBusy(() -> { - getAzureStorageService().createContainer("default", LocationMode.PRIMARY_ONLY, containerName); + getAzureStorageService().createContainer("default", containerName); }, 30, TimeUnit.SECONDS); } @@ -132,7 +130,7 @@ private static void createTestContainer(String containerName) throws Exception { * @param containerName container name to use */ private static void removeTestContainer(String containerName) throws URISyntaxException, StorageException { - getAzureStorageService().removeContainer("default", LocationMode.PRIMARY_ONLY, containerName); + getAzureStorageService().removeContainer("default", containerName); } @Override @@ -141,7 +139,7 @@ protected Collection> nodePlugins() { } private String getRepositoryPath() { - String testName = "it-" + getTestName(); + final String testName = "it-" + getTestName(); return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; } @@ -159,21 +157,21 @@ public Settings indexSettings() { public final void wipeAzureRepositories() { try { client().admin().cluster().prepareDeleteRepository("*").get(); - } catch (RepositoryMissingException ignored) { + } catch (final RepositoryMissingException ignored) { } } public void testMultipleRepositories() { - Client client = client(); + final Client client = client(); logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); - PutRepositoryResponse putRepositoryResponse1 = client.admin().cluster().preparePutRepository("test-repo1") + final PutRepositoryResponse putRepositoryResponse1 = client.admin().cluster().preparePutRepository("test-repo1") .setType("azure").setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName().concat("-1")) .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) .put(Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) ).get(); assertThat(putRepositoryResponse1.isAcknowledged(), equalTo(true)); - PutRepositoryResponse putRepositoryResponse2 = client.admin().cluster().preparePutRepository("test-repo2") + final PutRepositoryResponse putRepositoryResponse2 = client.admin().cluster().preparePutRepository("test-repo2") .setType("azure").setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName().concat("-2")) .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) @@ -194,14 +192,14 @@ public void testMultipleRepositories() { assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits(), equalTo(100L)); logger.info("--> snapshot 1"); - CreateSnapshotResponse createSnapshotResponse1 = client.admin().cluster().prepareCreateSnapshot("test-repo1", "test-snap") + final CreateSnapshotResponse createSnapshotResponse1 = client.admin().cluster().prepareCreateSnapshot("test-repo1", "test-snap") .setWaitForCompletion(true).setIndices("test-idx-1").get(); assertThat(createSnapshotResponse1.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse1.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse1.getSnapshotInfo().totalShards())); logger.info("--> snapshot 2"); - CreateSnapshotResponse createSnapshotResponse2 = client.admin().cluster().prepareCreateSnapshot("test-repo2", "test-snap") + final CreateSnapshotResponse createSnapshotResponse2 = client.admin().cluster().prepareCreateSnapshot("test-repo2", "test-snap") .setWaitForCompletion(true).setIndices("test-idx-2").get(); assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), @@ -216,7 +214,7 @@ public void testMultipleRepositories() { logger.info("--> delete indices"); cluster().wipeIndices("test-idx-1", "test-idx-2"); logger.info("--> restore one index after deletion from snapshot 1"); - RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin().cluster().prepareRestoreSnapshot("test-repo1", "test-snap") + final RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin().cluster().prepareRestoreSnapshot("test-repo1", "test-snap") .setWaitForCompletion(true).setIndices("test-idx-1").get(); assertThat(restoreSnapshotResponse1.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -226,7 +224,7 @@ public void testMultipleRepositories() { assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false)); logger.info("--> restore other index after deletion from snapshot 2"); - RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin().cluster().prepareRestoreSnapshot("test-repo2", "test-snap") + final RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin().cluster().prepareRestoreSnapshot("test-repo2", "test-snap") .setWaitForCompletion(true).setIndices("test-idx-2").get(); assertThat(restoreSnapshotResponse2.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -252,7 +250,7 @@ public void testListBlobs_26() throws StorageException, URISyntaxException { } refresh(); - ClusterAdminClient client = client().admin().cluster(); + final ClusterAdminClient client = client().admin().cluster(); logger.info("--> creating azure repository without any path"); PutRepositoryResponse putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure") .setSettings(Settings.builder() @@ -300,9 +298,9 @@ public void testListBlobs_26() throws StorageException, URISyntaxException { */ public void testGetDeleteNonExistingSnapshot_28() throws StorageException, URISyntaxException { final String repositoryName="test-repo-28"; - ClusterAdminClient client = client().admin().cluster(); + final ClusterAdminClient client = client().admin().cluster(); logger.info("--> creating azure repository without any path"); - PutRepositoryResponse putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure") + final PutRepositoryResponse putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure") .setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) ).get(); @@ -311,14 +309,14 @@ public void testGetDeleteNonExistingSnapshot_28() throws StorageException, URISy try { client.prepareGetSnapshots(repositoryName).addSnapshots("nonexistingsnapshotname").get(); fail("Shouldn't be here"); - } catch (SnapshotMissingException ex) { + } catch (final SnapshotMissingException ex) { // Expected } try { client.prepareDeleteSnapshot(repositoryName, "nonexistingsnapshotname").get(); fail("Shouldn't be here"); - } catch (SnapshotMissingException ex) { + } catch (final SnapshotMissingException ex) { // Expected } } @@ -328,9 +326,9 @@ public void testGetDeleteNonExistingSnapshot_28() throws StorageException, URISy */ public void testNonExistingRepo_23() { final String repositoryName = "test-repo-test23"; - Client client = client(); + final Client client = client(); logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); - PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository(repositoryName) + final PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository(repositoryName) .setType("azure").setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) @@ -342,7 +340,7 @@ public void testNonExistingRepo_23() { try { client.admin().cluster().prepareRestoreSnapshot(repositoryName, "no-existing-snapshot").setWaitForCompletion(true).get(); fail("Shouldn't be here"); - } catch (SnapshotRestoreException ex) { + } catch (final SnapshotRestoreException ex) { // Expected } } @@ -356,7 +354,7 @@ public void testRemoveAndCreateContainer() throws Exception { createTestContainer(container); removeTestContainer(container); - ClusterAdminClient client = client().admin().cluster(); + final ClusterAdminClient client = client().admin().cluster(); logger.info("--> creating azure repository while container is being removed"); try { client.preparePutRepository("test-repo").setType("azure") @@ -364,7 +362,7 @@ public void testRemoveAndCreateContainer() throws Exception { .put(Repository.CONTAINER_SETTING.getKey(), container) ).get(); fail("we should get a RepositoryVerificationException"); - } catch (RepositoryVerificationException e) { + } catch (final RepositoryVerificationException e) { // Fine we expect that } } @@ -378,9 +376,9 @@ public void testRemoveAndCreateContainer() throws Exception { * @throws Exception If anything goes wrong */ public void testGeoRedundantStorage() throws Exception { - Client client = client(); + final Client client = client(); logger.info("--> creating azure primary repository"); - PutRepositoryResponse putRepositoryResponsePrimary = client.admin().cluster().preparePutRepository("primary") + final PutRepositoryResponse putRepositoryResponsePrimary = client.admin().cluster().preparePutRepository("primary") .setType("azure").setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) ).get(); @@ -394,7 +392,7 @@ public void testGeoRedundantStorage() throws Exception { assertThat(endWait - startWait, lessThanOrEqualTo(30000L)); logger.info("--> creating azure secondary repository"); - PutRepositoryResponse putRepositoryResponseSecondary = client.admin().cluster().preparePutRepository("secondary") + final PutRepositoryResponse putRepositoryResponseSecondary = client.admin().cluster().preparePutRepository("secondary") .setType("azure").setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) .put(Repository.LOCATION_MODE_SETTING.getKey(), "secondary_only") diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 68b84594d62ca..d75016464f14e 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -19,11 +19,14 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.LocationMode; +import com.microsoft.azure.storage.OperationContext; import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobClient; + import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; @@ -39,6 +42,9 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Supplier; + +import static java.util.Collections.emptyMap; /** * In memory storage for unit tests @@ -52,42 +58,42 @@ public AzureStorageServiceMock() { } @Override - public boolean doesContainerExist(String account, LocationMode mode, String container) { + public boolean doesContainerExist(String account, String container) { return true; } @Override - public void removeContainer(String account, LocationMode mode, String container) { + public void removeContainer(String account, String container) { } @Override - public void createContainer(String account, LocationMode mode, String container) { + public void createContainer(String account, String container) { } @Override - public void deleteFiles(String account, LocationMode mode, String container, String path) { + public void deleteFiles(String account, String container, String path) { } @Override - public boolean blobExists(String account, LocationMode mode, String container, String blob) { + public boolean blobExists(String account, String container, String blob) { return blobs.containsKey(blob); } @Override - public void deleteBlob(String account, LocationMode mode, String container, String blob) { + public void deleteBlob(String account, String container, String blob) { blobs.remove(blob); } @Override - public InputStream getInputStream(String account, LocationMode mode, String container, String blob) throws IOException { - if (!blobExists(account, mode, container, blob)) { + public InputStream getInputStream(String account, String container, String blob) throws IOException { + if (!blobExists(account, container, blob)) { throw new NoSuchFileException("missing blob [" + blob + "]"); } return AzureStorageService.giveSocketPermissionsToStream(new PermissionRequiringInputStream(blobs.get(blob).toByteArray())); } @Override - public Map listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) { + public Map listBlobsByPrefix(String account, String container, String keyPath, String prefix) { MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); blobs.forEach((String blobName, ByteArrayOutputStream bos) -> { final String checkBlob; @@ -105,7 +111,7 @@ public Map listBlobsByPrefix(String account, LocationMode } @Override - public void moveBlob(String account, LocationMode mode, String container, String sourceBlob, String targetBlob) + public void moveBlob(String account, String container, String sourceBlob, String targetBlob) throws URISyntaxException, StorageException { for (String blobName : blobs.keySet()) { if (endsWithIgnoreCase(blobName, sourceBlob)) { @@ -117,7 +123,7 @@ public void moveBlob(String account, LocationMode mode, String container, String } @Override - public void writeBlob(String account, LocationMode mode, String container, String blobName, InputStream inputStream, long blobSize) + public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize) throws URISyntaxException, StorageException { try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { blobs.put(blobName, outputStream); @@ -197,4 +203,14 @@ public synchronized int read(byte[] b, int off, int len) { return super.read(b, off, len); } } + + @Override + public Tuple> client(String clientName) { + return null; + } + + @Override + public Map updateClientsSettings(Map clientsSettings) { + return emptyMap(); + } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java index 72cd015f14847..22b7079c4ad2b 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.RetryExponentialRetry; import com.microsoft.azure.storage.blob.CloudBlobClient; import com.microsoft.azure.storage.core.Base64; @@ -29,6 +28,7 @@ import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Proxy; @@ -50,7 +50,7 @@ public class AzureStorageServiceTests extends ESTestCase { private MockSecureSettings buildSecureSettings() { - MockSecureSettings secureSettings = new MockSecureSettings(); + final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("azure.client.azure1.account", "myaccount1"); secureSettings.setString("azure.client.azure1.key", "mykey1"); secureSettings.setString("azure.client.azure2.account", "myaccount2"); @@ -60,24 +60,24 @@ private MockSecureSettings buildSecureSettings() { return secureSettings; } private Settings buildSettings() { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .build(); return settings; } public void testReadSecuredSettings() { - MockSecureSettings secureSettings = new MockSecureSettings(); + final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("azure.client.azure1.account", "myaccount1"); secureSettings.setString("azure.client.azure1.key", "mykey1"); secureSettings.setString("azure.client.azure2.account", "myaccount2"); secureSettings.setString("azure.client.azure2.key", "mykey2"); secureSettings.setString("azure.client.azure3.account", "myaccount3"); secureSettings.setString("azure.client.azure3.key", "mykey3"); - Settings settings = Settings.builder().setSecureSettings(secureSettings) + final Settings settings = Settings.builder().setSecureSettings(secureSettings) .put("azure.client.azure3.endpoint_suffix", "my_endpoint_suffix").build(); - Map loadedSettings = AzureStorageSettings.load(settings); + final Map loadedSettings = AzureStorageSettings.load(settings); assertThat(loadedSettings.keySet(), containsInAnyOrder("azure1","azure2","azure3","default")); assertThat(loadedSettings.get("azure1").getEndpointSuffix(), isEmptyString()); @@ -85,95 +85,171 @@ public void testReadSecuredSettings() { assertThat(loadedSettings.get("azure3").getEndpointSuffix(), equalTo("my_endpoint_suffix")); } - public void testCreateClientWithEndpointSuffix() { - MockSecureSettings secureSettings = new MockSecureSettings(); + public void testCreateClientWithEndpointSuffix() throws IOException { + final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("azure.client.azure1.account", "myaccount1"); secureSettings.setString("azure.client.azure1.key", Base64.encode("mykey1".getBytes(StandardCharsets.UTF_8))); secureSettings.setString("azure.client.azure2.account", "myaccount2"); secureSettings.setString("azure.client.azure2.key", Base64.encode("mykey2".getBytes(StandardCharsets.UTF_8))); - Settings settings = Settings.builder().setSecureSettings(secureSettings) + final Settings settings = Settings.builder().setSecureSettings(secureSettings) .put("azure.client.azure1.endpoint_suffix", "my_endpoint_suffix").build(); - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(settings, AzureStorageSettings.load(settings)); - CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); - assertThat(client1.getEndpoint().toString(), equalTo("https://myaccount1.blob.my_endpoint_suffix")); + try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings)) { + final AzureStorageServiceImpl azureStorageService = (AzureStorageServiceImpl) plugin.azureStoreService; + final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); + assertThat(client1.getEndpoint().toString(), equalTo("https://myaccount1.blob.my_endpoint_suffix")); + final CloudBlobClient client2 = azureStorageService.client("azure2").v1(); + assertThat(client2.getEndpoint().toString(), equalTo("https://myaccount2.blob.core.windows.net")); + } + } - CloudBlobClient client2 = azureStorageService.getSelectedClient("azure2", LocationMode.PRIMARY_ONLY); - assertThat(client2.getEndpoint().toString(), equalTo("https://myaccount2.blob.core.windows.net")); + public void testReinitClientSettings() throws IOException { + final MockSecureSettings secureSettings1 = new MockSecureSettings(); + secureSettings1.setString("azure.client.azure1.account", "myaccount11"); + secureSettings1.setString("azure.client.azure1.key", Base64.encode("mykey11".getBytes(StandardCharsets.UTF_8))); + secureSettings1.setString("azure.client.azure2.account", "myaccount12"); + secureSettings1.setString("azure.client.azure2.key", Base64.encode("mykey12".getBytes(StandardCharsets.UTF_8))); + final Settings settings1 = Settings.builder().setSecureSettings(secureSettings1).build(); + final MockSecureSettings secureSettings2 = new MockSecureSettings(); + secureSettings2.setString("azure.client.azure1.account", "myaccount21"); + secureSettings2.setString("azure.client.azure1.key", Base64.encode("mykey21".getBytes(StandardCharsets.UTF_8))); + secureSettings2.setString("azure.client.azure3.account", "myaccount23"); + secureSettings2.setString("azure.client.azure3.key", Base64.encode("mykey23".getBytes(StandardCharsets.UTF_8))); + final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build(); + try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings1)) { + final AzureStorageServiceImpl azureStorageService = (AzureStorageServiceImpl) plugin.azureStoreService; + final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); + assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount11.blob.core.windows.net")); + final CloudBlobClient client12 = azureStorageService.client("azure2").v1(); + assertThat(client12.getEndpoint().toString(), equalTo("https://myaccount12.blob.core.windows.net")); + // client 3 is missing + final SettingsException e1 = expectThrows(SettingsException.class, () -> azureStorageService.client("azure3")); + assertThat(e1.getMessage(), is("Cannot find an azure client by the name [azure3]. Check your settings.")); + // update client settings + plugin.reinit(settings2); + // old client 1 not changed + assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount11.blob.core.windows.net")); + // new client 1 is changed + final CloudBlobClient client21 = azureStorageService.client("azure1").v1(); + assertThat(client21.getEndpoint().toString(), equalTo("https://myaccount21.blob.core.windows.net")); + // old client 2 not changed + assertThat(client12.getEndpoint().toString(), equalTo("https://myaccount12.blob.core.windows.net")); + // new client2 is gone + final SettingsException e2 = expectThrows(SettingsException.class, () -> azureStorageService.client("azure2")); + assertThat(e2.getMessage(), is("Cannot find an azure client by the name [azure2]. Check your settings.")); + // client 3 emerged + final CloudBlobClient client23 = azureStorageService.client("azure3").v1(); + assertThat(client23.getEndpoint().toString(), equalTo("https://myaccount23.blob.core.windows.net")); + } } - public void testGetSelectedClientWithNoPrimaryAndSecondary() { - try { - new AzureStorageServiceMockForSettings(Settings.EMPTY); - fail("we should have raised an IllegalArgumentException"); - } catch (IllegalArgumentException e) { + public void testReinitClientEmptySettings() throws IOException { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("azure.client.azure1.account", "myaccount1"); + secureSettings.setString("azure.client.azure1.key", Base64.encode("mykey11".getBytes(StandardCharsets.UTF_8))); + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings)) { + final AzureStorageServiceImpl azureStorageService = (AzureStorageServiceImpl) plugin.azureStoreService; + final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); + assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); + // reinit with empty settings + final SettingsException e = expectThrows(SettingsException.class, () -> plugin.reinit(Settings.EMPTY)); assertThat(e.getMessage(), is("If you want to use an azure repository, you need to define a client configuration.")); + // existing client untouched + assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); + // new client also untouched + final CloudBlobClient client21 = azureStorageService.client("azure1").v1(); + assertThat(client21.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); } } + public void testReinitClientWrongSettings() throws IOException { + final MockSecureSettings secureSettings1 = new MockSecureSettings(); + secureSettings1.setString("azure.client.azure1.account", "myaccount1"); + secureSettings1.setString("azure.client.azure1.key", Base64.encode("mykey11".getBytes(StandardCharsets.UTF_8))); + final Settings settings1 = Settings.builder().setSecureSettings(secureSettings1).build(); + final MockSecureSettings secureSettings2 = new MockSecureSettings(); + secureSettings2.setString("azure.client.azure1.account", "myaccount1"); + // missing key + final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build(); + try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings1)) { + final AzureStorageServiceImpl azureStorageService = (AzureStorageServiceImpl) plugin.azureStoreService; + final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); + assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); + plugin.reinit(settings2); + // existing client untouched + assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); + final SettingsException e = expectThrows(SettingsException.class, () -> azureStorageService.client("azure1")); + assertThat(e.getMessage(), is("Invalid azure client [azure1] settings.")); + } + } + + public void testGetSelectedClientWithNoPrimaryAndSecondary() { + final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMockForSettings(Settings.EMPTY)); + assertThat(e.getMessage(), is("If you want to use an azure repository, you need to define a client configuration.")); + } + public void testGetSelectedClientNonExisting() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(buildSettings()); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { - azureStorageService.getSelectedClient("azure4", LocationMode.PRIMARY_ONLY); - }); - assertThat(e.getMessage(), is("Can not find named azure client [azure4]. Check your settings.")); + final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(buildSettings()); + final SettingsException e = expectThrows(SettingsException.class, () -> azureStorageService.client("azure4")); + assertThat(e.getMessage(), is("Cannot find an azure client by the name [azure4]. Check your settings.")); } public void testGetSelectedClientDefaultTimeout() { - Settings timeoutSettings = Settings.builder() + final Settings timeoutSettings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure3.timeout", "30s") .build(); - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(timeoutSettings); - CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); + final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(timeoutSettings); + final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), nullValue()); - CloudBlobClient client3 = azureStorageService.getSelectedClient("azure3", LocationMode.PRIMARY_ONLY); + final CloudBlobClient client3 = azureStorageService.client("azure3").v1(); assertThat(client3.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(30 * 1000)); } public void testGetSelectedClientNoTimeout() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(buildSettings()); - CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); + final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(buildSettings()); + final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(nullValue())); } public void testGetSelectedClientBackoffPolicy() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(buildSettings()); - CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); + final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(buildSettings()); + final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); } public void testGetSelectedClientBackoffPolicyNbRetries() { - Settings timeoutSettings = Settings.builder() + final Settings timeoutSettings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.max_retries", 7) .build(); - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(timeoutSettings); - CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); + final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(timeoutSettings); + final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); } public void testNoProxy() { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .build(); - AzureStorageServiceMockForSettings mock = new AzureStorageServiceMockForSettings(settings); + final AzureStorageServiceMockForSettings mock = new AzureStorageServiceMockForSettings(settings); assertThat(mock.storageSettings.get("azure1").getProxy(), nullValue()); assertThat(mock.storageSettings.get("azure2").getProxy(), nullValue()); assertThat(mock.storageSettings.get("azure3").getProxy(), nullValue()); } public void testProxyHttp() throws UnknownHostException { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.proxy.host", "127.0.0.1") .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", "http") .build(); - AzureStorageServiceMockForSettings mock = new AzureStorageServiceMockForSettings(settings); - Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); + final AzureStorageServiceMockForSettings mock = new AzureStorageServiceMockForSettings(settings); + final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.HTTP)); @@ -183,7 +259,7 @@ public void testProxyHttp() throws UnknownHostException { } public void testMultipleProxies() throws UnknownHostException { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.proxy.host", "127.0.0.1") .put("azure.client.azure1.proxy.port", 8080) @@ -192,12 +268,12 @@ public void testMultipleProxies() throws UnknownHostException { .put("azure.client.azure2.proxy.port", 8081) .put("azure.client.azure2.proxy.type", "http") .build(); - AzureStorageServiceMockForSettings mock = new AzureStorageServiceMockForSettings(settings); - Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); + final AzureStorageServiceMockForSettings mock = new AzureStorageServiceMockForSettings(settings); + final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.HTTP)); assertThat(azure1Proxy.address(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8080))); - Proxy azure2Proxy = mock.storageSettings.get("azure2").getProxy(); + final Proxy azure2Proxy = mock.storageSettings.get("azure2").getProxy(); assertThat(azure2Proxy, notNullValue()); assertThat(azure2Proxy.type(), is(Proxy.Type.HTTP)); assertThat(azure2Proxy.address(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8081))); @@ -205,14 +281,14 @@ public void testMultipleProxies() throws UnknownHostException { } public void testProxySocks() throws UnknownHostException { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.proxy.host", "127.0.0.1") .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", "socks") .build(); - AzureStorageServiceMockForSettings mock = new AzureStorageServiceMockForSettings(settings); - Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); + final AzureStorageServiceMockForSettings mock = new AzureStorageServiceMockForSettings(settings); + final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.SOCKS)); assertThat(azure1Proxy.address(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8080))); @@ -221,47 +297,47 @@ public void testProxySocks() throws UnknownHostException { } public void testProxyNoHost() { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .build(); - SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMockForSettings(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMockForSettings(settings)); assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); } public void testProxyNoPort() { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.proxy.host", "127.0.0.1") .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .build(); - SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMockForSettings(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMockForSettings(settings)); assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); } public void testProxyNoType() { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.proxy.host", "127.0.0.1") .put("azure.client.azure1.proxy.port", 8080) .build(); - SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMockForSettings(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMockForSettings(settings)); assertEquals("Azure Proxy port or host have been set but proxy type is not defined.", e.getMessage()); } public void testProxyWrongHost() { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .put("azure.client.azure1.proxy.host", "thisisnotavalidhostorwehavebeensuperunlucky") .put("azure.client.azure1.proxy.port", 8080) .build(); - SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMockForSettings(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMockForSettings(settings)); assertEquals("Azure proxy host is unknown.", e.getMessage()); } @@ -270,14 +346,13 @@ public void testProxyWrongHost() { */ class AzureStorageServiceMockForSettings extends AzureStorageServiceImpl { AzureStorageServiceMockForSettings(Settings settings) { - super(settings, AzureStorageSettings.load(settings)); + super(settings); } // We fake the client here @Override - void createClient(AzureStorageSettings azureStorageSettings) { - this.clients.put(azureStorageSettings.getAccount(), - new CloudBlobClient(URI.create("https://" + azureStorageSettings.getAccount()))); + protected CloudBlobClient createClient(AzureStorageSettings azureStorageSettings) { + return new CloudBlobClient(URI.create("https://" + azureStorageSettings.getAccount())); } } From e1a2d7e454c1b502d0225b360dc31aa3145e572e Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Sun, 29 Apr 2018 16:36:30 +0300 Subject: [PATCH 07/21] Merge companion --- .../azure/AzureStorageServiceMock.java | 2 ++ .../s3/S3BlobStoreRepositoryTests.java | 23 ++++++++----------- .../repositories/s3/S3BlobStoreTests.java | 12 ++++++++-- .../repositories/s3/S3RepositoryTests.java | 15 ++++++++---- 4 files changed, 32 insertions(+), 20 deletions(-) diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index c6eb2ce975c8d..2de20c1babb82 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -72,6 +72,8 @@ public void createContainer(String account, String container) { @Override public void deleteFiles(String account, String container, String path) { + final Map blobs = listBlobsByPrefix(account, container, path, null); + blobs.keySet().forEach(key -> deleteBlob(account, container, key)); } @Override diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index e3e89c41514de..b3ec92547680f 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.repositories.s3; -import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.CannedAccessControlList; import com.amazonaws.services.s3.model.StorageClass; import org.elasticsearch.common.settings.Settings; @@ -39,7 +38,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import static java.util.Collections.emptyMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase { @@ -73,11 +71,9 @@ public static void wipeRepository() { @Override protected void createTestRepository(final String name) { - assertAcked(client().admin().cluster().preparePutRepository(name) - .setType(S3Repository.TYPE) - .setSettings(Settings.builder() + assertAcked(client().admin().cluster().preparePutRepository(name).setType(S3Repository.TYPE).setSettings(Settings.builder() .put(S3Repository.BUCKET_SETTING.getKey(), bucket) - .put(InternalAwsS3Service.CLIENT_NAME.getKey(), client) + .put(S3Repository.CLIENT_NAME.getKey(), client) .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), bufferSize) .put(S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), serverSideEncryption) .put(S3Repository.CANNED_ACL_SETTING.getKey(), cannedACL) @@ -97,13 +93,14 @@ public TestS3RepositoryPlugin(final Settings settings) { @Override public Map getRepositories(final Environment env, final NamedXContentRegistry registry) { - return Collections.singletonMap(S3Repository.TYPE, (metadata) -> - new S3Repository(metadata, env.settings(), registry, new InternalAwsS3Service(env.settings(), emptyMap()) { - @Override - public synchronized AmazonS3 client(final Settings repositorySettings) { - return new MockAmazonS3(blobs, bucket, serverSideEncryption, cannedACL, storageClass); - } - })); + return Collections.singletonMap(S3Repository.TYPE, + (metadata) -> new S3Repository(metadata, env.settings(), registry, new InternalAwsS3Service(env.settings()) { + @Override + public synchronized AmazonS3Reference client(String clientName) { + return new AmazonS3Reference(new MockAmazonS3(blobs, bucket, serverSideEncryption, cannedACL, storageClass)); + } + })); + } } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java index 4a23e4efa9a29..a44946b6b3ffa 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java @@ -115,7 +115,15 @@ public static S3BlobStore randomMockS3BlobStore() { storageClass = randomValueOtherThan(StorageClass.Glacier, () -> randomFrom(StorageClass.values())).toString(); } - AmazonS3 client = new MockAmazonS3(new ConcurrentHashMap<>(), bucket, serverSideEncryption, cannedACL, storageClass); - return new S3BlobStore(Settings.EMPTY, client, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); + final String theClientName = randomAlphaOfLength(4); + final AmazonS3 client = new MockAmazonS3(new ConcurrentHashMap<>(), bucket, serverSideEncryption, cannedACL, storageClass); + final AwsS3Service service = new InternalAwsS3Service(Settings.EMPTY) { + @Override + public synchronized AmazonS3Reference client(String clientName) { + assert theClientName.equals(clientName); + return new AmazonS3Reference(client); + } + }; + return new S3BlobStore(Settings.EMPTY, service, theClientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index 98c814fbc2445..a70088f83ea44 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -89,11 +89,16 @@ public void testInvalidChunkBufferSizeSettings() throws IOException { final Settings s3 = bufferAndChunkSettings(5, 5); new S3Repository(getRepositoryMetaData(s3), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()).close(); // buffer < 5mb should fail - assertInvalidBuffer(4, 10, IllegalArgumentException.class, - "failed to parse value [4mb] for setting [buffer_size], must be >= [5mb]"); - // chunk > 5tb should fail - assertInvalidBuffer(5, 6000000, IllegalArgumentException.class, - "failed to parse value [6000000mb] for setting [chunk_size], must be <= [5tb]"); + final Settings s4 = bufferAndChunkSettings(4, 10); + final IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class, + () -> new S3Repository(getRepositoryMetaData(s4), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()) + .close()); + assertThat(e2.getMessage(), containsString("failed to parse value [4mb] for setting [buffer_size], must be >= [5mb]")); + final Settings s5 = bufferAndChunkSettings(5, 6000000); + final IllegalArgumentException e3 = expectThrows(IllegalArgumentException.class, + () -> new S3Repository(getRepositoryMetaData(s5), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()) + .close()); + assertThat(e3.getMessage(), containsString("failed to parse value [6000000mb] for setting [chunk_size], must be <= [5tb]")); } private Settings bufferAndChunkSettings(long buffer, long chunk) { From 000c58586f596c24882772152a6c2d511b5d633f Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Mon, 28 May 2018 12:20:00 +0300 Subject: [PATCH 08/21] GCS repo plugin update-able secure settings (#30688) --- .../gcs/GoogleCloudStorageBlobContainer.java | 1 - .../gcs/GoogleCloudStorageBlobStore.java | 123 +++++++++--------- .../gcs/GoogleCloudStoragePlugin.java | 33 +++-- .../gcs/GoogleCloudStorageRepository.java | 5 +- .../gcs/GoogleCloudStorageService.java | 71 ++++++++-- ...leCloudStorageBlobStoreContainerTests.java | 15 ++- ...eCloudStorageBlobStoreRepositoryTests.java | 20 ++- .../gcs/GoogleCloudStorageBlobStoreTests.java | 15 ++- .../gcs/GoogleCloudStorageServiceTests.java | 92 ++++++++++--- 9 files changed, 258 insertions(+), 117 deletions(-) diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java index 833539905103a..bc8db906c47bd 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java @@ -26,7 +26,6 @@ import java.io.IOException; import java.io.InputStream; -import java.nio.file.FileAlreadyExistsException; import java.util.Map; class GoogleCloudStorageBlobContainer extends AbstractBlobContainer { diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 83aafdde2b1ab..8257bf4242e42 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -65,18 +65,24 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload private static final int LARGE_BLOB_THRESHOLD_BYTE_SIZE = 5 * 1024 * 1024; - private final Storage storage; - private final String bucket; + private final String bucketName; + private final String clientName; + private final GoogleCloudStorageService storageService; - GoogleCloudStorageBlobStore(Settings settings, String bucket, Storage storage) { + GoogleCloudStorageBlobStore(Settings settings, String bucketName, String clientName, GoogleCloudStorageService storageService) { super(settings); - this.bucket = bucket; - this.storage = storage; - if (doesBucketExist(bucket) == false) { - throw new BlobStoreException("Bucket [" + bucket + "] does not exist"); + this.bucketName = bucketName; + this.clientName = clientName; + this.storageService = storageService; + if (doesBucketExist(bucketName) == false) { + throw new BlobStoreException("Bucket [" + bucketName + "] does not exist"); } } + private Storage client() throws IOException { + return storageService.client(clientName); + } + @Override public BlobContainer blobContainer(BlobPath path) { return new GoogleCloudStorageBlobContainer(path, this); @@ -92,14 +98,14 @@ public void close() { } /** - * Return true if the given bucket exists + * Return true iff the given bucket exists * * @param bucketName name of the bucket - * @return true if the bucket exists, false otherwise + * @return true iff the bucket exists */ boolean doesBucketExist(String bucketName) { try { - final Bucket bucket = SocketAccess.doPrivilegedIOException(() -> storage.get(bucketName)); + final Bucket bucket = SocketAccess.doPrivilegedIOException(() -> client().get(bucketName)); return bucket != null; } catch (final Exception e) { throw new BlobStoreException("Unable to check if bucket [" + bucketName + "] exists", e); @@ -107,10 +113,9 @@ boolean doesBucketExist(String bucketName) { } /** - * List blobs in the bucket under the specified path. The path root is removed. + * List blobs in the specific bucket under the specified path. The path root is removed. * - * @param path - * base path of the blobs to list + * @param path base path of the blobs to list * @return a map of blob names and their metadata */ Map listBlobs(String path) throws IOException { @@ -118,20 +123,19 @@ Map listBlobs(String path) throws IOException { } /** - * List all blobs in the bucket which have a prefix + * List all blobs in the specific bucket with names prefixed * * @param path * base path of the blobs to list. This path is removed from the * names of the blobs returned. - * @param prefix - * prefix of the blobs to list. + * @param prefix prefix of the blobs to list. * @return a map of blob names and their metadata. */ Map listBlobsByPrefix(String path, String prefix) throws IOException { final String pathPrefix = buildKey(path, prefix); final MapBuilder mapBuilder = MapBuilder.newMapBuilder(); SocketAccess.doPrivilegedVoidIOException(() -> { - storage.get(bucket).list(BlobListOption.prefix(pathPrefix)).iterateAll().forEach(blob -> { + client().get(bucketName).list(BlobListOption.prefix(pathPrefix)).iterateAll().forEach(blob -> { assert blob.getName().startsWith(path); final String suffixName = blob.getName().substring(path.length()); mapBuilder.put(suffixName, new PlainBlobMetaData(suffixName, blob.getSize())); @@ -141,26 +145,26 @@ Map listBlobsByPrefix(String path, String prefix) throws I } /** - * Returns true if the blob exists in the bucket + * Returns true if the blob exists in the specific bucket * * @param blobName name of the blob - * @return true if the blob exists, false otherwise + * @return true iff the blob exists */ boolean blobExists(String blobName) throws IOException { - final BlobId blobId = BlobId.of(bucket, blobName); - final Blob blob = SocketAccess.doPrivilegedIOException(() -> storage.get(blobId)); + final BlobId blobId = BlobId.of(bucketName, blobName); + final Blob blob = SocketAccess.doPrivilegedIOException(() -> client().get(blobId)); return blob != null; } /** - * Returns an {@link java.io.InputStream} for a given blob + * Returns an {@link java.io.InputStream} for the given blob name * * @param blobName name of the blob - * @return an InputStream + * @return the InputStream used to read the blob's content */ InputStream readBlob(String blobName) throws IOException { - final BlobId blobId = BlobId.of(bucket, blobName); - final Blob blob = SocketAccess.doPrivilegedIOException(() -> storage.get(blobId)); + final BlobId blobId = BlobId.of(bucketName, blobName); + final Blob blob = SocketAccess.doPrivilegedIOException(() -> client().get(blobId)); if (blob == null) { throw new NoSuchFileException("Blob [" + blobName + "] does not exit"); } @@ -185,13 +189,13 @@ public void close() throws IOException { } /** - * Writes a blob in the bucket. + * Writes a blob in the specific bucket * * @param inputStream content of the blob to be written * @param blobSize expected size of the blob to be written */ void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { - final BlobInfo blobInfo = BlobInfo.newBuilder(bucket, blobName).build(); + final BlobInfo blobInfo = BlobInfo.newBuilder(bucketName, blobName).build(); if (blobSize > LARGE_BLOB_THRESHOLD_BYTE_SIZE) { writeBlobResumable(blobInfo, inputStream); } else { @@ -209,8 +213,8 @@ void writeBlob(String blobName, InputStream inputStream, long blobSize) throws I */ private void writeBlobResumable(BlobInfo blobInfo, InputStream inputStream) throws IOException { try { - final WriteChannel writeChannel = SocketAccess.doPrivilegedIOException( - () -> storage.writer(blobInfo, Storage.BlobWriteOption.doesNotExist())); + final WriteChannel writeChannel = SocketAccess + .doPrivilegedIOException(() -> client().writer(blobInfo, Storage.BlobWriteOption.doesNotExist())); Streams.copy(inputStream, Channels.newOutputStream(new WritableByteChannel() { @Override public boolean isOpen() { @@ -228,7 +232,7 @@ public int write(ByteBuffer src) throws IOException { return SocketAccess.doPrivilegedIOException(() -> writeChannel.write(src)); } })); - } catch (StorageException se) { + } catch (final StorageException se) { if (se.getCode() == HTTP_PRECON_FAILED) { throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage()); } @@ -250,45 +254,43 @@ private void writeBlobMultipart(BlobInfo blobInfo, InputStream inputStream, long assert blobSize <= LARGE_BLOB_THRESHOLD_BYTE_SIZE : "large blob uploads should use the resumable upload method"; final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.toIntExact(blobSize)); Streams.copy(inputStream, baos); - SocketAccess.doPrivilegedVoidIOException( - () -> { - try { - storage.create(blobInfo, baos.toByteArray(), Storage.BlobTargetOption.doesNotExist()); - } catch (StorageException se) { - if (se.getCode() == HTTP_PRECON_FAILED) { - throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage()); - } - throw se; - } - }); + try { + SocketAccess.doPrivilegedVoidIOException( + () -> client().create(blobInfo, baos.toByteArray(), Storage.BlobTargetOption.doesNotExist())); + } catch (final StorageException se) { + if (se.getCode() == HTTP_PRECON_FAILED) { + throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage()); + } + throw se; + } } /** - * Deletes a blob in the bucket + * Deletes the blob from the specific bucket * * @param blobName name of the blob */ void deleteBlob(String blobName) throws IOException { - final BlobId blobId = BlobId.of(bucket, blobName); - final boolean deleted = SocketAccess.doPrivilegedIOException(() -> storage.delete(blobId)); + final BlobId blobId = BlobId.of(bucketName, blobName); + final boolean deleted = SocketAccess.doPrivilegedIOException(() -> client().delete(blobId)); if (deleted == false) { throw new NoSuchFileException("Blob [" + blobName + "] does not exist"); } } /** - * Deletes multiple blobs in the bucket that have a given prefix + * Deletes multiple blobs from the specific bucket all of which have prefixed names * - * @param prefix prefix of the buckets to delete + * @param prefix prefix of the blobs to delete */ void deleteBlobsByPrefix(String prefix) throws IOException { deleteBlobs(listBlobsByPrefix("", prefix).keySet()); } /** - * Deletes multiple blobs in the given bucket (uses a batch request to perform this) + * Deletes multiple blobs from the specific bucket using a batch request * - * @param blobNames names of the bucket to delete + * @param blobNames names of the blobs to delete */ void deleteBlobs(Collection blobNames) throws IOException { if (blobNames.isEmpty()) { @@ -299,13 +301,13 @@ void deleteBlobs(Collection blobNames) throws IOException { deleteBlob(blobNames.iterator().next()); return; } - final List blobIdsToDelete = blobNames.stream().map(blobName -> BlobId.of(bucket, blobName)).collect(Collectors.toList()); - final List deletedStatuses = SocketAccess.doPrivilegedIOException(() -> storage.delete(blobIdsToDelete)); + final List blobIdsToDelete = blobNames.stream().map(blob -> BlobId.of(bucketName, blob)).collect(Collectors.toList()); + final List deletedStatuses = SocketAccess.doPrivilegedIOException(() -> client().delete(blobIdsToDelete)); assert blobIdsToDelete.size() == deletedStatuses.size(); boolean failed = false; for (int i = 0; i < blobIdsToDelete.size(); i++) { if (deletedStatuses.get(i) == false) { - logger.error("Failed to delete blob [{}] in bucket [{}]", blobIdsToDelete.get(i).getName(), bucket); + logger.error("Failed to delete blob [{}] in bucket [{}]", blobIdsToDelete.get(i).getName(), bucketName); failed = true; } } @@ -315,26 +317,27 @@ void deleteBlobs(Collection blobNames) throws IOException { } /** - * Moves a blob within the same bucket + * Moves a blob within the specific bucket * * @param sourceBlobName name of the blob to move * @param targetBlobName new name of the blob in the same bucket */ void moveBlob(String sourceBlobName, String targetBlobName) throws IOException { - final BlobId sourceBlobId = BlobId.of(bucket, sourceBlobName); - final BlobId targetBlobId = BlobId.of(bucket, targetBlobName); + final BlobId sourceBlobId = BlobId.of(bucketName, sourceBlobName); + final BlobId targetBlobId = BlobId.of(bucketName, targetBlobName); final CopyRequest request = CopyRequest.newBuilder() .setSource(sourceBlobId) .setTarget(targetBlobId) .build(); - SocketAccess.doPrivilegedVoidIOException(() -> { - // There's no atomic "move" in GCS so we need to copy and delete + // There's no atomic "move" in GCS so we need to copy and delete + final Storage storage = client(); + final boolean deleted = SocketAccess.doPrivilegedIOException(() -> { storage.copy(request).getResult(); - final boolean deleted = storage.delete(sourceBlobId); - if (deleted == false) { - throw new IOException("Failed to move source [" + sourceBlobName + "] to target [" + targetBlobName + "]"); - } + return storage.delete(sourceBlobId); }); + if (deleted == false) { + throw new IOException("Failed to move source [" + sourceBlobName + "] to target [" + targetBlobName + "]"); + } } private static String buildKey(String keyPath, String s) { diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java index 1d2d70584adf9..bea572006a6d7 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java @@ -24,35 +24,34 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ReInitializablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.Repository; - import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; -public class GoogleCloudStoragePlugin extends Plugin implements RepositoryPlugin { +public class GoogleCloudStoragePlugin extends Plugin implements RepositoryPlugin, ReInitializablePlugin { - private final Map clientsSettings; + // package-private for tests + final GoogleCloudStorageService storageService; public GoogleCloudStoragePlugin(final Settings settings) { - clientsSettings = GoogleCloudStorageClientSettings.load(settings); - } - - protected Map getClientsSettings() { - return clientsSettings; + this.storageService = createStorageService(settings); + // eagerly load client settings so that secure settings are readable (not closed) + reinit(settings); } // overridable for tests - protected GoogleCloudStorageService createStorageService(Environment environment) { - return new GoogleCloudStorageService(environment, clientsSettings); + protected GoogleCloudStorageService createStorageService(Settings settings) { + return new GoogleCloudStorageService(settings); } @Override public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { return Collections.singletonMap(GoogleCloudStorageRepository.TYPE, - (metadata) -> new GoogleCloudStorageRepository(metadata, env, namedXContentRegistry, createStorageService(env))); + (metadata) -> new GoogleCloudStorageRepository(metadata, env, namedXContentRegistry, this.storageService)); } @Override @@ -66,4 +65,16 @@ public List> getSettings() { GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING, GoogleCloudStorageClientSettings.TOKEN_URI_SETTING); } + + @Override + public boolean reinit(Settings settings) { + // Secure settings should be readable inside this method. Duplicate client + // settings in a format (`GoogleCloudStorageClientSettings`) that does not + // require for the `SecureSettings` to be open. Pass that around (the + // `GoogleCloudStorageClientSettings` instance) instead of the `Settings` + // instance. + final Map clientsSettings = GoogleCloudStorageClientSettings.load(settings); + this.storageService.updateClientsSettings(clientsSettings); + return true; + } } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index 976befae0a269..83d48eeda20aa 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -38,8 +38,6 @@ import static org.elasticsearch.common.settings.Setting.byteSizeSetting; import static org.elasticsearch.common.settings.Setting.simpleString; -import com.google.cloud.storage.Storage; - class GoogleCloudStorageRepository extends BlobStoreRepository { // package private for testing @@ -86,8 +84,7 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { logger.debug("using bucket [{}], base_path [{}], chunk_size [{}], compress [{}]", bucket, basePath, chunkSize, compress); - Storage client = SocketAccess.doPrivilegedIOException(() -> storageService.createClient(clientName)); - this.blobStore = new GoogleCloudStorageBlobStore(settings, bucket, client); + this.blobStore = new GoogleCloudStorageBlobStore(settings, bucket, clientName, storageService); } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java index 57bcc4b131356..6aea5d20364bf 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java @@ -28,12 +28,12 @@ import com.google.cloud.storage.Storage; import com.google.cloud.storage.StorageOptions; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.env.Environment; - import java.io.IOException; import java.net.HttpURLConnection; import java.net.URI; @@ -41,28 +41,78 @@ import java.net.URL; import java.util.Map; +import static java.util.Collections.emptyMap; + public class GoogleCloudStorageService extends AbstractComponent { /** Clients settings identified by client name. */ - private final Map clientsSettings; + private volatile Map clientsSettings = emptyMap(); + /** Cache of client instances. Client instances are built once for each setting change. */ + private volatile Map clientsCache = emptyMap(); + + public GoogleCloudStorageService(final Settings settings) { + super(settings); + } - public GoogleCloudStorageService(final Environment environment, final Map clientsSettings) { - super(environment.settings()); - this.clientsSettings = clientsSettings; + /** + * Updates the client settings and clears the client cache. Subsequent calls to + * {@code GoogleCloudStorageService#client} will return new clients constructed + * using these passed settings. + * + * @param clientsSettings the new settings used for building clients for subsequent requests + * @return previous settings which have been substituted + */ + public synchronized Map + updateClientsSettings(Map clientsSettings) { + final Map prevSettings = this.clientsSettings; + this.clientsSettings = MapBuilder.newMapBuilder(clientsSettings).immutableMap(); + this.clientsCache = emptyMap(); + // clients are built lazily by {@link client(String)} + return prevSettings; + } + + /** + * Attempts to retrieve a client from the cache. If the client does not exist it + * will be created from the latest settings and will populate the cache. The + * returned instance should not be cached by the calling code. Instead, for each + * use, the (possibly updated) instance should be requested by calling this + * method. + * + * @param clientName name of the client settings used to create the client + * @return a cached client storage instance that can be used to manage objects + * (blobs) + */ + public Storage client(final String clientName) throws IOException { + Storage storage = clientsCache.get(clientName); + if (storage != null) { + return storage; + } + synchronized (this) { + storage = clientsCache.get(clientName); + if (storage != null) { + return storage; + } + storage = SocketAccess.doPrivilegedIOException(() -> createClient(clientName)); + clientsCache = MapBuilder.newMapBuilder(clientsCache).put(clientName, storage).immutableMap(); + return storage; + } } /** - * Creates a client that can be used to manage Google Cloud Storage objects. + * Creates a client that can be used to manage Google Cloud Storage objects. The client is thread-safe. * * @param clientName name of client settings to use, including secure settings - * @return a Client instance that can be used to manage Storage objects + * @return a new client storage instance that can be used to manage objects + * (blobs) */ - public Storage createClient(final String clientName) throws Exception { + private Storage createClient(final String clientName) throws Exception { final GoogleCloudStorageClientSettings clientSettings = clientsSettings.get(clientName); if (clientSettings == null) { throw new IllegalArgumentException("Unknown client name [" + clientName + "]. Existing client configs: " + Strings.collectionToDelimitedString(clientsSettings.keySet(), ",")); } + logger.debug(() -> new ParameterizedMessage("creating GCS client with client_name [{}], endpoint [{}]", clientName, + clientSettings.getHost())); final HttpTransport httpTransport = createHttpTransport(clientSettings.getHost()); final HttpTransportOptions httpTransportOptions = HttpTransportOptions.newBuilder() .setConnectTimeout(toTimeout(clientSettings.getConnectTimeout())) @@ -114,6 +164,9 @@ private static HttpTransport createHttpTransport(final String endpoint) throws E builder.trustCertificates(GoogleUtils.getCertificateTrustStore()); if (Strings.hasLength(endpoint)) { final URL endpointUrl = URI.create(endpoint).toURL(); + // it is crucial to open a connection for each URL (see {@code + // DefaultConnectionFactory#openConnection}) instead of reusing connections, + // because the storage instance has to be thread-safe as it is cached. builder.setConnectionFactory(new DefaultConnectionFactory() { @Override public HttpURLConnection openConnection(final URL originalUrl) throws IOException { diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java index 27736e24dbf51..0cc1243f28311 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java @@ -26,11 +26,22 @@ import java.util.Locale; import java.util.concurrent.ConcurrentHashMap; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + public class GoogleCloudStorageBlobStoreContainerTests extends ESBlobStoreContainerTestCase { @Override protected BlobStore newBlobStore() { - String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, new MockStorage(bucket, new ConcurrentHashMap<>())); + final String bucketName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + final String clientName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + final GoogleCloudStorageService storageService = mock(GoogleCloudStorageService.class); + try { + when(storageService.client(any(String.class))).thenReturn(new MockStorage(bucketName, new ConcurrentHashMap<>())); + } catch (final Exception e) { + throw new RuntimeException(e); + } + return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucketName, clientName, storageService); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index c4d9b67899672..3692b26f2bbb7 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -24,14 +24,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; import org.junit.AfterClass; import java.util.Collection; import java.util.Collections; -import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -73,19 +71,19 @@ public MockGoogleCloudStoragePlugin(final Settings settings) { } @Override - protected GoogleCloudStorageService createStorageService(Environment environment) { - return new MockGoogleCloudStorageService(environment, getClientsSettings()); + protected GoogleCloudStorageService createStorageService(Settings settings) { + return new MockGoogleCloudStorageService(settings); } } public static class MockGoogleCloudStorageService extends GoogleCloudStorageService { - MockGoogleCloudStorageService(Environment environment, Map clientsSettings) { - super(environment, clientsSettings); + MockGoogleCloudStorageService(Settings settings) { + super(settings); } @Override - public Storage createClient(String clientName) { + public Storage client(String clientName) { return new MockStorage(BUCKET, blobs); } } @@ -97,7 +95,7 @@ public void testChunkSize() { assertEquals(GoogleCloudStorageRepository.MAX_CHUNK_SIZE, chunkSize); // chunk size in settings - int size = randomIntBetween(1, 100); + final int size = randomIntBetween(1, 100); repositoryMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, Settings.builder().put("chunk_size", size + "mb").build()); chunkSize = GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repositoryMetaData); @@ -105,7 +103,7 @@ public void testChunkSize() { // zero bytes is not allowed IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { - RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, + final RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, Settings.builder().put("chunk_size", "0").build()); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData); }); @@ -113,7 +111,7 @@ public void testChunkSize() { // negative bytes not allowed e = expectThrows(IllegalArgumentException.class, () -> { - RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, + final RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, Settings.builder().put("chunk_size", "-1").build()); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData); }); @@ -121,7 +119,7 @@ public void testChunkSize() { // greater than max chunk size not allowed e = expectThrows(IllegalArgumentException.class, () -> { - RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, + final RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, Settings.builder().put("chunk_size", "101mb").build()); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData); }); diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java index 5e25307805235..4634bd3274a70 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java @@ -26,11 +26,22 @@ import java.util.Locale; import java.util.concurrent.ConcurrentHashMap; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + public class GoogleCloudStorageBlobStoreTests extends ESBlobStoreTestCase { @Override protected BlobStore newBlobStore() { - String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, new MockStorage(bucket, new ConcurrentHashMap<>())); + final String bucketName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + final String clientName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + final GoogleCloudStorageService storageService = mock(GoogleCloudStorageService.class); + try { + when(storageService.client(any(String.class))).thenReturn(new MockStorage(bucketName, new ConcurrentHashMap<>())); + } catch (final Exception e) { + throw new RuntimeException(e); + } + return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucketName, clientName, storageService); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java index a33ae90c549bc..a85cd118175ad 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java @@ -23,28 +23,36 @@ import com.google.cloud.http.HttpTransportOptions; import com.google.cloud.storage.Storage; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.env.Environment; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; -import java.util.Collections; + +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.util.Base64; import java.util.Locale; +import java.util.UUID; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.containsString; public class GoogleCloudStorageServiceTests extends ESTestCase { public void testClientInitializer() throws Exception { - final String clientName = randomAlphaOfLength(4).toLowerCase(Locale.ROOT); - final Environment environment = mock(Environment.class); + final String clientName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); final TimeValue connectTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); final TimeValue readTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); - final String applicationName = randomAlphaOfLength(4); - final String hostName = randomFrom("http://", "https://") + randomAlphaOfLength(4) + ":" + randomIntBetween(1, 65535); - final String projectIdName = randomAlphaOfLength(4); + final String applicationName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + final String endpoint = randomFrom("http://", "https://") + + randomFrom("www.elastic.co", "www.googleapis.com", "localhost/api", "google.com/oauth") + + ":" + randomIntBetween(1, 65535); + final String projectIdName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); final Settings settings = Settings.builder() .put(GoogleCloudStorageClientSettings.CONNECT_TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), connectTimeValue.getStringRep()) @@ -52,20 +60,18 @@ public void testClientInitializer() throws Exception { readTimeValue.getStringRep()) .put(GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName).getKey(), applicationName) - .put(GoogleCloudStorageClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), hostName) + .put(GoogleCloudStorageClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint) .put(GoogleCloudStorageClientSettings.PROJECT_ID_SETTING.getConcreteSettingForNamespace(clientName).getKey(), projectIdName) .build(); - when(environment.settings()).thenReturn(settings); - final GoogleCloudStorageClientSettings clientSettings = GoogleCloudStorageClientSettings.getClientSettings(settings, clientName); - final GoogleCloudStorageService service = new GoogleCloudStorageService(environment, - Collections.singletonMap(clientName, clientSettings)); - final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.createClient("another_client")); + final GoogleCloudStorageService service = new GoogleCloudStorageService(settings); + service.updateClientsSettings(GoogleCloudStorageClientSettings.load(settings)); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.client("another_client")); assertThat(e.getMessage(), Matchers.startsWith("Unknown client name")); assertSettingDeprecationsAndWarnings( new Setting[] { GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName) }); - final Storage storage = service.createClient(clientName); + final Storage storage = service.client(clientName); assertThat(storage.getOptions().getApplicationName(), Matchers.containsString(applicationName)); - assertThat(storage.getOptions().getHost(), Matchers.is(hostName)); + assertThat(storage.getOptions().getHost(), Matchers.is(endpoint)); assertThat(storage.getOptions().getProjectId(), Matchers.is(projectIdName)); assertThat(storage.getOptions().getTransportOptions(), Matchers.instanceOf(HttpTransportOptions.class)); assertThat(((HttpTransportOptions) storage.getOptions().getTransportOptions()).getConnectTimeout(), @@ -75,6 +81,58 @@ public void testClientInitializer() throws Exception { assertThat(storage.getOptions().getCredentials(), Matchers.nullValue(Credentials.class)); } + public void testReinitClientSettings() throws Exception { + final MockSecureSettings secureSettings1 = new MockSecureSettings(); + secureSettings1.setFile("gcs.client.gcs1.credentials_file", serviceAccountFileContent("project_gcs11")); + secureSettings1.setFile("gcs.client.gcs2.credentials_file", serviceAccountFileContent("project_gcs12")); + final Settings settings1 = Settings.builder().setSecureSettings(secureSettings1).build(); + final MockSecureSettings secureSettings2 = new MockSecureSettings(); + secureSettings2.setFile("gcs.client.gcs1.credentials_file", serviceAccountFileContent("project_gcs21")); + secureSettings2.setFile("gcs.client.gcs3.credentials_file", serviceAccountFileContent("project_gcs23")); + final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build(); + try (GoogleCloudStoragePlugin plugin = new GoogleCloudStoragePlugin(settings1)) { + final GoogleCloudStorageService storageService = plugin.storageService; + final Storage client11 = storageService.client("gcs1"); + assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11")); + final Storage client12 = storageService.client("gcs2"); + assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); + // client 3 is missing + final IllegalArgumentException e1 = expectThrows(IllegalArgumentException.class, () -> storageService.client("gcs3")); + assertThat(e1.getMessage(), containsString("Unknown client name [gcs3].")); + // update client settings + plugin.reinit(settings2); + // old client 1 not changed + assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11")); + // new client 1 is changed + final Storage client21 = storageService.client("gcs1"); + assertThat(client21.getOptions().getProjectId(), equalTo("project_gcs21")); + // old client 2 not changed + assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); + // new client2 is gone + final IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class, () -> storageService.client("gcs2")); + assertThat(e2.getMessage(), containsString("Unknown client name [gcs2].")); + // client 3 emerged + final Storage client23 = storageService.client("gcs3"); + assertThat(client23.getOptions().getProjectId(), equalTo("project_gcs23")); + } + } + + private byte[] serviceAccountFileContent(String projectId) throws Exception { + final KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA"); + keyPairGenerator.initialize(1024); + final KeyPair keyPair = keyPairGenerator.generateKeyPair(); + final String encodedKey = Base64.getEncoder().encodeToString(keyPair.getPrivate().getEncoded()); + final XContentBuilder serviceAccountBuilder = jsonBuilder().startObject() + .field("type", "service_account") + .field("project_id", projectId) + .field("private_key_id", UUID.randomUUID().toString()) + .field("private_key", "-----BEGIN PRIVATE KEY-----\n" + encodedKey + "\n-----END PRIVATE KEY-----\n") + .field("client_email", "integration_test@appspot.gserviceaccount.com") + .field("client_id", "client_id") + .endObject(); + return BytesReference.toBytes(BytesReference.bytes(serviceAccountBuilder)); + } + public void testToTimeout() { assertEquals(-1, GoogleCloudStorageService.toTimeout(null).intValue()); assertEquals(-1, GoogleCloudStorageService.toTimeout(TimeValue.ZERO).intValue()); From c4ebd6f4595a9715a8eb598d29b021b70d547621 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 5 Jun 2018 11:43:38 +0300 Subject: [PATCH 09/21] Renames and refactoring for reloadable plugins (#30992) --- .../discovery/ec2/AwsEc2Service.java | 27 ++-- .../discovery/ec2/AwsEc2ServiceImpl.java | 16 +- .../discovery/ec2/Ec2DiscoveryPlugin.java | 13 +- .../ec2/Ec2DiscoveryPluginTests.java | 2 +- .../repositories/azure/AzureBlobStore.java | 4 +- .../azure/AzureRepositoryPlugin.java | 9 +- .../azure/AzureStorageService.java | 9 +- .../azure/AzureStorageServiceImpl.java | 4 +- .../azure/AzureStorageSettings.java | 2 +- .../azure/AzureStorageServiceMock.java | 2 +- .../azure/AzureStorageServiceTests.java | 6 +- .../gcs/GoogleCloudStoragePlugin.java | 11 +- .../gcs/GoogleCloudStorageService.java | 4 +- .../gcs/GoogleCloudStorageServiceTests.java | 4 +- .../repositories/s3/AwsS3Service.java | 23 ++- .../repositories/s3/InternalAwsS3Service.java | 19 ++- .../repositories/s3/S3BlobStore.java | 5 +- .../repositories/s3/S3Repository.java | 4 +- .../repositories/s3/S3RepositoryPlugin.java | 15 +- .../s3/RepositoryCredentialsTests.java | 2 +- .../repositories/s3/S3RepositoryTests.java | 4 +- .../repositories/s3/TestAwsS3Service.java | 2 +- .../elasticsearch/action/ActionModule.java | 10 +- .../NodesReloadSecureSettingsAction.java} | 19 +-- .../NodesReloadSecureSettingsRequest.java} | 41 ++--- ...esReloadSecureSettingsRequestBuilder.java} | 18 +-- .../NodesReloadSecureSettingsResponse.java} | 71 +++++++-- ...nsportNodesReloadSecureSettingsAction.java | 144 ++++++++++++++++++ .../reinit/TransportNodesReInitAction.java | 128 ---------------- .../client/ClusterAdminClient.java | 4 +- .../client/support/AbstractClient.java | 8 +- .../org/elasticsearch/plugins/Plugin.java | 1 + .../plugins/ReInitializablePlugin.java | 26 ---- .../plugins/ReloadablePlugin.java | 54 +++++++ ...va => RestReloadSecureSettingsAction.java} | 32 ++-- 35 files changed, 420 insertions(+), 323 deletions(-) rename server/src/main/java/org/elasticsearch/action/admin/cluster/{reinit/NodesReInitAction.java => node/reload/NodesReloadSecureSettingsAction.java} (55%) rename server/src/main/java/org/elasticsearch/action/admin/cluster/{reinit/NodesReInitRequest.java => node/reload/NodesReloadSecureSettingsRequest.java} (54%) rename server/src/main/java/org/elasticsearch/action/admin/cluster/{reinit/NodesReInitRequestBuilder.java => node/reload/NodesReloadSecureSettingsRequestBuilder.java} (62%) rename server/src/main/java/org/elasticsearch/action/admin/cluster/{reinit/NodesReInitResponse.java => node/reload/NodesReloadSecureSettingsResponse.java} (52%) create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/TransportNodesReInitAction.java delete mode 100644 server/src/main/java/org/elasticsearch/plugins/ReInitializablePlugin.java create mode 100644 server/src/main/java/org/elasticsearch/plugins/ReloadablePlugin.java rename server/src/main/java/org/elasticsearch/rest/action/admin/cluster/{RestReInitAction.java => RestReloadSecureSettingsAction.java} (68%) diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Service.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Service.java index 9765ce6e1bdfc..c001e35ad1ee6 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Service.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Service.java @@ -22,12 +22,14 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; + +import java.io.Closeable; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.function.Function; -interface AwsEc2Service { +interface AwsEc2Service extends Closeable { Setting AUTO_ATTRIBUTE_SETTING = Setting.boolSetting("cloud.node.auto_attributes", false, Property.NodeScope); class HostType { @@ -79,25 +81,20 @@ class HostType { key -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), Property.NodeScope)); /** - * Creates then caches an {@code AmazonEC2} client using the current client - * settings. + * Builds then caches an {@code AmazonEC2} client using the current client + * settings. Returns an {@code AmazonEc2Reference} wrapper which should be + * released as soon as it is not required anymore. */ AmazonEc2Reference client(); /** - * Updates settings for building the client. Future client requests will use the - * new settings. Implementations SHOULD drop the client cache to prevent reusing - * the client with old settings from cache. + * Updates the settings for building the client and releases the cached one. + * Future client requests will use the new settings to lazily built the new + * client. * - * @param clientSettings - * the new settings - * @return the old settings + * @param clientSettings the new refreshed settings + * @return the old stale settings */ - Ec2ClientSettings updateClientSettings(Ec2ClientSettings clientSettings); + Ec2ClientSettings refreshAndClearCache(Ec2ClientSettings clientSettings); - /** - * Releases the cached client. Subsequent client requests will recreate the - * client instance. Does not touch the client settings. - */ - void releaseCachedClient(); } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java index 8d31ac213534e..d3aaa153711e4 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery.ec2; +import java.io.IOException; import java.util.Random; import com.amazonaws.ClientConfiguration; @@ -127,12 +128,12 @@ public AmazonEc2Reference client() { /** - * Reloads the settings for the AmazonEC2 client. New clients will be build - * using these. Old client is usable until released. On release it will be - * destroyed instead of being returned to the cache. + * Refreshes the settings for the AmazonEC2 client. New clients will be build + * using these new settings. Old client is usable until released. On release it + * will be destroyed instead of being returned to the cache. */ @Override - public synchronized Ec2ClientSettings updateClientSettings(Ec2ClientSettings clientSettings) { + public synchronized Ec2ClientSettings refreshAndClearCache(Ec2ClientSettings clientSettings) { // shutdown all unused clients // others will shutdown on their respective release releaseCachedClient(); @@ -142,7 +143,11 @@ public synchronized Ec2ClientSettings updateClientSettings(Ec2ClientSettings cli } @Override - public synchronized void releaseCachedClient() { + public void close() { + releaseCachedClient(); + } + + private synchronized void releaseCachedClient() { if (this.clientReference == null) { return; } @@ -154,4 +159,5 @@ public synchronized void releaseCachedClient() { // it will be restarted on new client usage IdleConnectionReaper.shutdown(); } + } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java index 4b86263b9ad55..9fc32ea306c0e 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java @@ -31,7 +31,7 @@ import org.elasticsearch.node.Node; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.ReInitializablePlugin; +import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.transport.TransportService; import java.io.BufferedReader; @@ -50,7 +50,7 @@ import java.util.Map; import java.util.function.Supplier; -public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, ReInitializablePlugin { +public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, ReloadablePlugin { private static Logger logger = Loggers.getLogger(Ec2DiscoveryPlugin.class); public static final String EC2 = "ec2"; @@ -85,7 +85,7 @@ protected Ec2DiscoveryPlugin(Settings settings, AwsEc2ServiceImpl ec2Service) { this.settings = settings; this.ec2Service = ec2Service; // eagerly load client settings when secure settings are accessible - reinit(settings); + reload(settings); } @Override @@ -172,14 +172,13 @@ static Settings getAvailabilityZoneNodeAttributes(Settings settings, String azMe @Override public void close() throws IOException { - ec2Service.releaseCachedClient(); + ec2Service.close(); } @Override - public boolean reinit(Settings settings) { + public void reload(Settings settings) { // secure settings should be readable final Ec2ClientSettings clientSettings = Ec2ClientSettings.getClientSettings(settings); - ec2Service.updateClientSettings(clientSettings); - return true; + ec2Service.refreshAndClearCache(clientSettings); } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java index 87754cc8f9af6..5db5bf84ab936 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java @@ -135,7 +135,7 @@ public void testClientSettingsReInit() throws IOException { assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyPort(), is(881)); assertThat(((AmazonEc2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1")); // reload secure settings2 - plugin.reinit(settings2); + plugin.reload(settings2); // client is not released, it is still using the old settings assertThat(((AmazonEc2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_1")); assertThat(((AmazonEc2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_1")); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 56ae5b6af31ba..8384ff5943f5d 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -54,9 +54,9 @@ public AzureBlobStore(RepositoryMetaData metadata, Settings settings, AzureStora this.service = service; // locationMode is set per repository, not per client this.locationMode = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); - final Map prevSettings = this.service.updateClientsSettings(emptyMap()); + final Map prevSettings = this.service.refreshAndClearCache(emptyMap()); final Map newSettings = AzureStorageSettings.overrideLocationMode(prevSettings, this.locationMode); - this.service.updateClientsSettings(newSettings); + this.service.refreshAndClearCache(newSettings); } @Override diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index eb92fd198c570..1c53422e1902b 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.ReInitializablePlugin; +import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.Repository; import java.util.Arrays; @@ -35,7 +35,7 @@ /** * A plugin to add a repository type that writes to and from the Azure cloud storage service. */ -public class AzureRepositoryPlugin extends Plugin implements RepositoryPlugin, ReInitializablePlugin { +public class AzureRepositoryPlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin { // protected for testing final AzureStorageService azureStoreService; @@ -65,10 +65,9 @@ public List> getSettings() { } @Override - public boolean reinit(Settings settings) { + public void reload(Settings settings) { // secure settings should be readable final Map clientsSettings = AzureStorageSettings.load(settings); - azureStoreService.updateClientsSettings(clientsSettings); - return true; + azureStoreService.refreshAndClearCache(clientsSettings); } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index dd832bcb80de1..d0a167993f868 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -50,14 +50,13 @@ public interface AzureStorageService { Tuple> client(String clientName); /** - * Updates settings for building clients. Future client requests will use the - * new settings. + * Updates settings for building clients. Any client cache is cleared. Future + * client requests will use the new refreshed settings. * - * @param clientsSettings - * the new settings + * @param clientsSettings the settings for new clients * @return the old settings */ - Map updateClientsSettings(Map clientsSettings); + Map refreshAndClearCache(Map clientsSettings); ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES); ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java index 734ba4f7fa1af..dbd2fab64da86 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java @@ -61,7 +61,7 @@ public AzureStorageServiceImpl(Settings settings) { super(settings); // eagerly load client settings so that secure settings are read final Map clientsSettings = AzureStorageSettings.load(settings); - updateClientsSettings(clientsSettings); + refreshAndClearCache(clientsSettings); } @Override @@ -107,7 +107,7 @@ protected OperationContext buildOperationContext(AzureStorageSettings azureStora } @Override - public Map updateClientsSettings(Map clientsSettings) { + public Map refreshAndClearCache(Map clientsSettings) { final Map prevSettings = this.storageSettings; this.storageSettings = MapBuilder.newMapBuilder(clientsSettings).immutableMap(); // clients are built lazily by {@link client(String)} diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java index 6423fc1ce3c17..42676f56bf70c 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java @@ -185,7 +185,7 @@ public String toString() { } /** - * Parses settings and read all settings available under azure.client.* + * Parse and read all settings available under the azure.client.* namespace * @param settings settings to parse * @return All the named configurations */ diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 2de20c1babb82..880b921afe55a 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -212,7 +212,7 @@ public Tuple> client(String clientNa } @Override - public Map updateClientsSettings(Map clientsSettings) { + public Map refreshAndClearCache(Map clientsSettings) { return emptyMap(); } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java index 391f9295751e2..3dc943df1c2db 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java @@ -95,7 +95,7 @@ public void testReinitClientSettings() throws IOException { final SettingsException e1 = expectThrows(SettingsException.class, () -> azureStorageService.client("azure3")); assertThat(e1.getMessage(), is("Unable to find client with name [azure3]")); // update client settings - plugin.reinit(settings2); + plugin.reload(settings2); // old client 1 not changed assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount11.blob.core.windows.net")); // new client 1 is changed @@ -122,7 +122,7 @@ public void testReinitClientEmptySettings() throws IOException { final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); // reinit with empty settings - final SettingsException e = expectThrows(SettingsException.class, () -> plugin.reinit(Settings.EMPTY)); + final SettingsException e = expectThrows(SettingsException.class, () -> plugin.reload(Settings.EMPTY)); assertThat(e.getMessage(), is("If you want to use an azure repository, you need to define a client configuration.")); // existing client untouched assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); @@ -145,7 +145,7 @@ public void testReinitClientWrongSettings() throws IOException { final AzureStorageServiceImpl azureStorageService = (AzureStorageServiceImpl) plugin.azureStoreService; final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); - plugin.reinit(settings2); + plugin.reload(settings2); // existing client untouched assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); final SettingsException e = expectThrows(SettingsException.class, () -> azureStorageService.client("azure1")); diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java index bea572006a6d7..12e7fd26ff565 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.ReInitializablePlugin; +import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.Repository; import java.util.Arrays; @@ -32,7 +32,7 @@ import java.util.List; import java.util.Map; -public class GoogleCloudStoragePlugin extends Plugin implements RepositoryPlugin, ReInitializablePlugin { +public class GoogleCloudStoragePlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin { // package-private for tests final GoogleCloudStorageService storageService; @@ -40,7 +40,7 @@ public class GoogleCloudStoragePlugin extends Plugin implements RepositoryPlugin public GoogleCloudStoragePlugin(final Settings settings) { this.storageService = createStorageService(settings); // eagerly load client settings so that secure settings are readable (not closed) - reinit(settings); + reload(settings); } // overridable for tests @@ -67,14 +67,13 @@ public List> getSettings() { } @Override - public boolean reinit(Settings settings) { + public void reload(Settings settings) { // Secure settings should be readable inside this method. Duplicate client // settings in a format (`GoogleCloudStorageClientSettings`) that does not // require for the `SecureSettings` to be open. Pass that around (the // `GoogleCloudStorageClientSettings` instance) instead of the `Settings` // instance. final Map clientsSettings = GoogleCloudStorageClientSettings.load(settings); - this.storageService.updateClientsSettings(clientsSettings); - return true; + this.storageService.refreshAndClearCache(clientsSettings); } } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java index 6aea5d20364bf..9fe78dfb9970b 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java @@ -55,7 +55,7 @@ public GoogleCloudStorageService(final Settings settings) { } /** - * Updates the client settings and clears the client cache. Subsequent calls to + * Refreshes the client settings and clears the client cache. Subsequent calls to * {@code GoogleCloudStorageService#client} will return new clients constructed * using these passed settings. * @@ -63,7 +63,7 @@ public GoogleCloudStorageService(final Settings settings) { * @return previous settings which have been substituted */ public synchronized Map - updateClientsSettings(Map clientsSettings) { + refreshAndClearCache(Map clientsSettings) { final Map prevSettings = this.clientsSettings; this.clientsSettings = MapBuilder.newMapBuilder(clientsSettings).immutableMap(); this.clientsCache = emptyMap(); diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java index a85cd118175ad..0130d2c576cd5 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java @@ -64,7 +64,7 @@ public void testClientInitializer() throws Exception { .put(GoogleCloudStorageClientSettings.PROJECT_ID_SETTING.getConcreteSettingForNamespace(clientName).getKey(), projectIdName) .build(); final GoogleCloudStorageService service = new GoogleCloudStorageService(settings); - service.updateClientsSettings(GoogleCloudStorageClientSettings.load(settings)); + service.refreshAndClearCache(GoogleCloudStorageClientSettings.load(settings)); final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.client("another_client")); assertThat(e.getMessage(), Matchers.startsWith("Unknown client name")); assertSettingDeprecationsAndWarnings( @@ -100,7 +100,7 @@ public void testReinitClientSettings() throws Exception { final IllegalArgumentException e1 = expectThrows(IllegalArgumentException.class, () -> storageService.client("gcs3")); assertThat(e1.getMessage(), containsString("Unknown client name [gcs3].")); // update client settings - plugin.reinit(settings2); + plugin.reload(settings2); // old client 1 not changed assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11")); // new client 1 is changed diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java index 38e39747de7fa..03b06c5b1bd34 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java @@ -19,30 +19,25 @@ package org.elasticsearch.repositories.s3; +import java.io.Closeable; import java.util.Map; -interface AwsS3Service { +interface AwsS3Service extends Closeable { /** * Creates then caches an {@code AmazonS3} client using the current client - * settings. + * settings. Returns an {@code AmazonS3Reference} wrapper which has to be + * released as soon as it is not needed anymore. */ AmazonS3Reference client(String clientName); /** - * Updates settings for building clients. Future client requests will use the - * new settings. Implementations SHOULD drop the client cache to prevent reusing - * clients with old settings from cache. + * Updates settings for building clients and clears the client cache. Future + * client requests will use the new settings to lazily build new clients. * - * @param clientsSettings - * the new settings - * @return the old settings + * @param clientsSettings the new refreshed settings + * @return the old stale settings */ - Map updateClientsSettings(Map clientsSettings); + Map refreshAndClearCache(Map clientsSettings); - /** - * Releases cached clients. Subsequent client requests will recreate client - * instances. Does not touch the client settings. - */ - void releaseCachedClients(); } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java index 381b72bdf950c..a54320f1fbd19 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java @@ -34,6 +34,8 @@ import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; + +import java.io.IOException; import java.util.Map; import static java.util.Collections.emptyMap; @@ -48,12 +50,13 @@ class InternalAwsS3Service extends AbstractComponent implements AwsS3Service { } /** - * Reloads the settings for the AmazonS3 client. New clients will be build using - * these. Old clients are usable until released. On release they will be - * destroyed contrary to being returned to the cache. + * Refreshes the settings for the AmazonS3 clients and clears the cache of + * existing clients. New clients will be build using these new settings. Old + * clients are usable until released. On release they will be destroyed instead + * to being returned to the cache. */ @Override - public synchronized Map updateClientsSettings(Map clientsSettings) { + public synchronized Map refreshAndClearCache(Map clientsSettings) { // shutdown all unused clients // others will shutdown on their respective release releaseCachedClients(); @@ -142,8 +145,7 @@ static AWSCredentialsProvider buildCredentials(Logger logger, S3ClientSettings c } } - @Override - public synchronized void releaseCachedClients() { + protected synchronized void releaseCachedClients() { // the clients will shutdown when they will not be used anymore for (final AmazonS3Reference clientReference : clientsCache.values()) { clientReference.decRef(); @@ -173,4 +175,9 @@ public void refresh() { } } + @Override + public void close() throws IOException { + releaseCachedClients(); + } + } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index c0d89c0f8fd01..c0f61e4d07828 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; +import java.io.IOException; import java.util.ArrayList; import java.util.Locale; @@ -150,8 +151,8 @@ public void delete(BlobPath path) { } @Override - public void close() { - this.service.releaseCachedClients(); + public void close() throws IOException { + this.service.close(); } public CannedAccessControlList getCannedACL() { diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index c021ed063d8a5..9f984c4b5e362 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -192,9 +192,9 @@ class S3Repository extends BlobStoreRepository { + "store these in named clients and the elasticsearch keystore for secure settings."); final BasicAWSCredentials insecureCredentials = S3ClientSettings.loadDeprecatedCredentials(metadata.settings()); // hack, but that's ok because the whole if branch should be axed - final Map prevSettings = awsService.updateClientsSettings(S3ClientSettings.load(Settings.EMPTY)); + final Map prevSettings = awsService.refreshAndClearCache(S3ClientSettings.load(Settings.EMPTY)); final Map newSettings = S3ClientSettings.overrideCredentials(prevSettings, insecureCredentials); - awsService.updateClientsSettings(newSettings); + awsService.refreshAndClearCache(newSettings); } blobStore = new S3BlobStore(settings, awsService, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 91b4a7863f805..93561c94d2b9a 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -35,14 +35,14 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.ReInitializablePlugin; +import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.Repository; /** * A plugin to add a repository type that writes to and from the AWS S3. */ -public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, ReInitializablePlugin { +public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin { static { SpecialPermission.check(); @@ -66,7 +66,7 @@ public S3RepositoryPlugin(Settings settings) { this.awsS3Service = getAwsS3Service(settings); // eagerly load client settings so that secure settings are read final Map clientsSettings = S3ClientSettings.load(settings); - this.awsS3Service.updateClientsSettings(clientsSettings); + this.awsS3Service.refreshAndClearCache(clientsSettings); } protected S3RepositoryPlugin(AwsS3Service awsS3Service) { @@ -109,15 +109,14 @@ public List> getSettings() { } @Override - public boolean reinit(Settings settings) { + public void reload(Settings settings) { // secure settings should be readable final Map clientsSettings = S3ClientSettings.load(settings); - awsS3Service.updateClientsSettings(clientsSettings); - return true; + awsS3Service.refreshAndClearCache(clientsSettings); } @Override - public void close() { - awsS3Service.releaseCachedClients(); + public void close() throws IOException { + awsS3Service.close(); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index c42403503e0c8..f3bd894977999 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -179,7 +179,7 @@ public void testReinitSecureCredentials() throws IOException { newSecureSettings.setString("s3.client." + clientName + ".secret_key", "new_secret_aws_secret"); final Settings newSettings = Settings.builder().setSecureSettings(newSecureSettings).build(); // reload S3 plugin settings - s3Plugin.reinit(newSettings); + s3Plugin.reload(newSettings); // check the not-yet-closed client reference still has the same credentials if (repositorySettings) { assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index a70088f83ea44..5c0aada66585c 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -67,12 +67,12 @@ public AmazonS3Reference client(String clientName) { } @Override - public Map updateClientsSettings(Map clientsSettings) { + public Map refreshAndClearCache(Map clientsSettings) { return Collections.emptyMap(); } @Override - public void releaseCachedClients() { + public void close() { } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java index 85a11d722cbe7..f376f73820624 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java @@ -52,7 +52,7 @@ private AmazonS3 cachedWrapper(AmazonS3Reference clientReference) { } @Override - public synchronized void releaseCachedClients() { + protected synchronized void releaseCachedClients() { super.releaseCachedClients(); clients.clear(); } diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index fa3a3ff612bf6..652a58196e271 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -29,6 +29,8 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction; import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsAction; +import org.elasticsearch.action.admin.cluster.node.reload.TransportNodesReloadSecureSettingsAction; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction; import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; @@ -39,8 +41,6 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageAction; import org.elasticsearch.action.admin.cluster.node.usage.TransportNodesUsageAction; -import org.elasticsearch.action.admin.cluster.reinit.NodesReInitAction; -import org.elasticsearch.action.admin.cluster.reinit.TransportNodesReInitAction; import org.elasticsearch.action.admin.cluster.remote.RemoteInfoAction; import org.elasticsearch.action.admin.cluster.remote.TransportRemoteInfoAction; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; @@ -239,7 +239,7 @@ import org.elasticsearch.rest.action.admin.cluster.RestPendingClusterTasksAction; import org.elasticsearch.rest.action.admin.cluster.RestPutRepositoryAction; import org.elasticsearch.rest.action.admin.cluster.RestPutStoredScriptAction; -import org.elasticsearch.rest.action.admin.cluster.RestReInitAction; +import org.elasticsearch.rest.action.admin.cluster.RestReloadSecureSettingsAction; import org.elasticsearch.rest.action.admin.cluster.RestRemoteClusterInfoAction; import org.elasticsearch.rest.action.admin.cluster.RestRestoreSnapshotAction; import org.elasticsearch.rest.action.admin.cluster.RestSnapshotsStatusAction; @@ -496,7 +496,7 @@ public void reg actions.register(ExplainAction.INSTANCE, TransportExplainAction.class); actions.register(ClearScrollAction.INSTANCE, TransportClearScrollAction.class); actions.register(RecoveryAction.INSTANCE, TransportRecoveryAction.class); - actions.register(NodesReInitAction.INSTANCE, TransportNodesReInitAction.class); + actions.register(NodesReloadSecureSettingsAction.INSTANCE, TransportNodesReloadSecureSettingsAction.class); //Indexed scripts actions.register(PutStoredScriptAction.INSTANCE, TransportPutStoredScriptAction.class); @@ -619,7 +619,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestRecoveryAction(settings, restController)); - registerHandler.accept(new RestReInitAction(settings, restController)); + registerHandler.accept(new RestReloadSecureSettingsAction(settings, restController)); // Scripts API registerHandler.accept(new RestGetStoredScriptAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java similarity index 55% rename from server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitAction.java rename to server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java index 0bfe3d08604af..705756e6a628b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java @@ -17,27 +17,28 @@ * under the License. */ -package org.elasticsearch.action.admin.cluster.reinit; +package org.elasticsearch.action.admin.cluster.node.reload; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -public class NodesReInitAction extends Action { +public class NodesReloadSecureSettingsAction + extends Action { - public static final NodesReInitAction INSTANCE = new NodesReInitAction(); - public static final String NAME = "cluster:admin/reinit"; + public static final NodesReloadSecureSettingsAction INSTANCE = new NodesReloadSecureSettingsAction(); + public static final String NAME = "cluster:admin/nodes/reload_secure_settings"; - private NodesReInitAction() { + private NodesReloadSecureSettingsAction() { super(NAME); } @Override - public NodesReInitResponse newResponse() { - return new NodesReInitResponse(); + public NodesReloadSecureSettingsResponse newResponse() { + return new NodesReloadSecureSettingsResponse(); } @Override - public NodesReInitRequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new NodesReInitRequestBuilder(client, this); + public NodesReloadSecureSettingsRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new NodesReloadSecureSettingsRequestBuilder(client, this); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java similarity index 54% rename from server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequest.java rename to server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index 18b2bc6792017..e3a9229893ed2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -17,62 +17,69 @@ * under the License. */ -package org.elasticsearch.action.admin.cluster.reinit; +package org.elasticsearch.action.admin.cluster.node.reload; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.nodes.BaseNodesRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; - import java.io.IOException; import static org.elasticsearch.action.ValidateActions.addValidationError; /** - * Request for an update cluster settings action + * Request for a reload secure settings action */ -public class NodesReInitRequest extends BaseNodesRequest { +public class NodesReloadSecureSettingsRequest extends BaseNodesRequest { - private String secureStorePassword; + /** + * The password which is broadcasted to all nodes, but is never stored on + * persistent storage. The password is used to reread and decrypt the contents + * of the node's keystore (backing the implementation of + * {@code SecureSettings}). + */ + private String secureSettingsPassword; - public NodesReInitRequest() { + public NodesReloadSecureSettingsRequest() { } /** - * Get usage from nodes based on the nodes ids specified. If none are - * passed, usage for all nodes will be returned. + * Reload secure settings only on certain nodes, based on the nodes ids + * specified. If none are passed, secure settings will be reloaded on all the + * nodes. */ - public NodesReInitRequest(String... nodesIds) { + public NodesReloadSecureSettingsRequest(String... nodesIds) { super(nodesIds); } @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; - if (secureStorePassword == null) { - validationException = addValidationError("secure store password cannot be null (use empty string).", validationException); + if (secureSettingsPassword == null) { + validationException = addValidationError("secure settings password cannot be null (use empty string instead)", + validationException); } return validationException; } - public String secureStorePassword() { - return secureStorePassword; + public String secureSettingsPassword() { + return secureSettingsPassword; } - public NodesReInitRequest secureStorePassword(String secureStorePassword) { - this.secureStorePassword = secureStorePassword; + public NodesReloadSecureSettingsRequest secureStorePassword(String secureStorePassword) { + this.secureSettingsPassword = secureStorePassword; return this; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - secureStorePassword = in.readString(); + secureSettingsPassword = in.readString(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(secureStorePassword); + out.writeString(secureSettingsPassword); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java similarity index 62% rename from server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequestBuilder.java rename to server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java index 95c5eef90abc8..fbf0d86d7c52c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java @@ -17,26 +17,22 @@ * under the License. */ -package org.elasticsearch.action.admin.cluster.reinit; +package org.elasticsearch.action.admin.cluster.node.reload; import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; - /** - * Builder for a cluster update settings request + * Builder for the reload secure settings nodes request */ -public class NodesReInitRequestBuilder - extends NodesOperationRequestBuilder { +public class NodesReloadSecureSettingsRequestBuilder extends NodesOperationRequestBuilder { - public NodesReInitRequestBuilder(ElasticsearchClient client, NodesReInitAction action) { - super(client, action, new NodesReInitRequest()); + public NodesReloadSecureSettingsRequestBuilder(ElasticsearchClient client, NodesReloadSecureSettingsAction action) { + super(client, action, new NodesReloadSecureSettingsRequest()); } - /** - * Sets the transient settings to be updated. They will not survive a full cluster restart - */ - public NodesReInitRequestBuilder setSecureStorePassword(String secureStorePassword) { + public NodesReloadSecureSettingsRequestBuilder setSecureStorePassword(String secureStorePassword) { request.secureStorePassword(secureStorePassword); return this; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java similarity index 52% rename from server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitResponse.java rename to server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java index 3386906f83333..394b1f10dc2d9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/NodesReInitResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java @@ -17,8 +17,9 @@ * under the License. */ -package org.elasticsearch.action.admin.cluster.reinit; +package org.elasticsearch.action.admin.cluster.node.reload; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.action.support.nodes.BaseNodesResponse; @@ -30,42 +31,47 @@ import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; - import java.io.IOException; import java.util.List; /** - * A response for a cluster update settings action. + * The response for the reload secure settings action */ -public class NodesReInitResponse extends BaseNodesResponse implements ToXContentFragment { +public class NodesReloadSecureSettingsResponse extends BaseNodesResponse + implements ToXContentFragment { - public NodesReInitResponse() { + public NodesReloadSecureSettingsResponse() { } - public NodesReInitResponse(ClusterName clusterName, List nodes, List failures) { + public NodesReloadSecureSettingsResponse(ClusterName clusterName, List nodes, List failures) { super(clusterName, nodes, failures); } @Override - protected List readNodesFrom(StreamInput in) throws IOException { + protected List readNodesFrom(StreamInput in) throws IOException { return in.readList(NodeResponse::readNodeResponse); } @Override - protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { out.writeStreamableList(nodes); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject("nodes"); - for (final NodesReInitResponse.NodeResponse node : getNodes()) { + for (final NodesReloadSecureSettingsResponse.NodeResponse node : getNodes()) { builder.startObject(node.getNode().getId()); builder.field("name", node.getNode().getName()); + final Exception e = node.reloadException(); + if (e != null) { + builder.startObject("reload_exception"); + ElasticsearchException.generateThrowableXContent(builder, params, e); + builder.endObject(); + } builder.endObject(); } builder.endObject(); - return builder; } @@ -84,11 +90,54 @@ public String toString() { public static class NodeResponse extends BaseNodeResponse { + private Exception reloadException = null; + public NodeResponse() { } - public NodeResponse(DiscoveryNode node) { + public NodeResponse(DiscoveryNode node, Exception reloadException) { super(node); + this.reloadException = reloadException; + } + + public Exception reloadException() { + return this.reloadException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + if (in.readBoolean()) { + reloadException = in.readException(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + if (reloadException != null) { + out.writeBoolean(true); + out.writeException(reloadException); + } else { + out.writeBoolean(false); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final NodesReloadSecureSettingsResponse.NodeResponse that = (NodesReloadSecureSettingsResponse.NodeResponse) o; + return reloadException != null ? reloadException.equals(that.reloadException) : that.reloadException == null; + } + + @Override + public int hashCode() { + return reloadException != null ? reloadException.hashCode() : 0; } public static NodeResponse readNodeResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java new file mode 100644 index 0000000000000..5e8cb306d497d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -0,0 +1,144 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.node.reload; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.nodes.BaseNodeRequest; +import org.elasticsearch.action.support.nodes.TransportNodesAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.ReloadablePlugin; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class TransportNodesReloadSecureSettingsAction extends TransportNodesAction { + + private final Environment environment; + private final PluginsService pluginsService; + + @Inject + public TransportNodesReloadSecureSettingsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, Environment environment, + PluginsService pluginService) { + super(settings, NodesReloadSecureSettingsAction.NAME, threadPool, clusterService, transportService, actionFilters, + indexNameExpressionResolver, NodesReloadSecureSettingsRequest::new, NodeRequest::new, ThreadPool.Names.GENERIC, + NodesReloadSecureSettingsResponse.NodeResponse.class); + this.environment = environment; + this.pluginsService = pluginService; + } + + @Override + protected NodesReloadSecureSettingsResponse newResponse(NodesReloadSecureSettingsRequest request, + List responses, + List failures) { + return new NodesReloadSecureSettingsResponse(clusterService.getClusterName(), responses, failures); + } + + @Override + protected NodeRequest newNodeRequest(String nodeId, NodesReloadSecureSettingsRequest request) { + return new NodeRequest(nodeId, request); + } + + @Override + protected NodesReloadSecureSettingsResponse.NodeResponse newNodeResponse() { + return new NodesReloadSecureSettingsResponse.NodeResponse(); + } + + @Override + protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeRequest nodeReloadRequest) { + final NodesReloadSecureSettingsRequest request = nodeReloadRequest.request; + KeyStoreWrapper keystore = null; + try { + // reread keystore from config file + keystore = KeyStoreWrapper.load(environment.configFile()); + if (keystore == null) { + return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), + new IllegalStateException("Keystore is missing")); + } + // decrypt the keystore using the password from the request + keystore.decrypt(request.secureSettingsPassword().toCharArray()); + // add the keystore to the original node settings object + final Settings settingsWithKeystore = Settings.builder() + .put(environment.settings(), false) + .setSecureSettings(keystore) + .build(); + final List exceptions = new ArrayList<>(); + // broadcast the new settings object (with the open embedded keystore) to all reloadable plugins + pluginsService.filterPlugins(ReloadablePlugin.class).stream().forEach(p -> { + try { + p.reload(settingsWithKeystore); + } catch (final Exception e) { + exceptions.add(e); + } + }); + ExceptionsHelper.rethrowAndSuppress(exceptions); + return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), null); + } catch (final Exception e) { + return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), e); + } finally { + if (keystore != null) { + keystore.close(); + } + } + } + + public static class NodeRequest extends BaseNodeRequest { + + NodesReloadSecureSettingsRequest request; + + public NodeRequest() { + } + + NodeRequest(String nodeId, NodesReloadSecureSettingsRequest request) { + super(nodeId); + this.request = request; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + request = new NodesReloadSecureSettingsRequest(); + request.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + request.writeTo(out); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/TransportNodesReInitAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/TransportNodesReInitAction.java deleted file mode 100644 index 1ae53f0da1e2f..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reinit/TransportNodesReInitAction.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.cluster.reinit; - -import org.elasticsearch.action.FailedNodeException; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.nodes.BaseNodeRequest; -import org.elasticsearch.action.support.nodes.TransportNodesAction; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.KeyStoreWrapper; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.plugins.PluginsService; -import org.elasticsearch.plugins.ReInitializablePlugin; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.security.GeneralSecurityException; -import java.util.List; - -public class TransportNodesReInitAction extends TransportNodesAction { - - private final Environment environment; - private final PluginsService pluginsService; - - @Inject - public TransportNodesReInitAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, Environment environment, - PluginsService pluginService) { - super(settings, NodesReInitAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, - NodesReInitRequest::new, NodeRequest::new, ThreadPool.Names.MANAGEMENT, NodesReInitResponse.NodeResponse.class); - this.environment = environment; - this.pluginsService = pluginService; - } - - @Override - protected NodesReInitResponse newResponse(NodesReInitRequest request, List responses, - List failures) { - return new NodesReInitResponse(clusterService.getClusterName(), responses, failures); - } - - @Override - protected TransportNodesReInitAction.NodeRequest newNodeRequest(String nodeId, NodesReInitRequest request) { - return new NodeRequest(nodeId, request); - } - - @Override - protected NodesReInitResponse.NodeResponse newNodeResponse() { - return new NodesReInitResponse.NodeResponse(); - } - - @Override - protected NodesReInitResponse.NodeResponse nodeOperation(TransportNodesReInitAction.NodeRequest nodeStatsRequest) { - final NodesReInitRequest request = nodeStatsRequest.request; - // open keystore - KeyStoreWrapper keystore = null; - try { - keystore = KeyStoreWrapper.load(environment.configFile()); - keystore.decrypt(new char[0] /* use password from request */); - } catch (GeneralSecurityException | IOException e) { - throw new RuntimeException(e); - } finally { - if (keystore != null) { - keystore.close(); - } - } - - final Settings.Builder builder = Settings.builder().put(environment.settings(), false); - builder.setSecureSettings(keystore); - - final boolean success = pluginsService.filterPlugins(ReInitializablePlugin.class).stream() - .map(p -> p.reinit(builder.build())).allMatch(e -> e == true); - - return new NodesReInitResponse.NodeResponse(clusterService.localNode()); - } - - public static class NodeRequest extends BaseNodeRequest { - - NodesReInitRequest request; - - public NodeRequest() { - } - - NodeRequest(String nodeId, NodesReInitRequest request) { - super(nodeId); - this.request = request; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - request = new NodesReInitRequest(); - request.readFrom(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - request.writeTo(out); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java index 147a24976babb..949b0110fff20 100644 --- a/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java @@ -33,6 +33,7 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -48,7 +49,6 @@ import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageResponse; -import org.elasticsearch.action.admin.cluster.reinit.NodesReInitRequestBuilder; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse; @@ -189,7 +189,7 @@ public interface ClusterAdminClient extends ElasticsearchClient { /** * Re initialize each cluster node and pass them the secret store password. */ - NodesReInitRequestBuilder prepareReInit(); + NodesReloadSecureSettingsRequestBuilder prepareReloadSecureSettings(); /** * Reroutes allocation of shards. Advance API. diff --git a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 0c140580a0126..dc70da4e61f7e 100644 --- a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -41,6 +41,8 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsAction; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; @@ -61,8 +63,6 @@ import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageResponse; -import org.elasticsearch.action.admin.cluster.reinit.NodesReInitAction; -import org.elasticsearch.action.admin.cluster.reinit.NodesReInitRequestBuilder; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder; @@ -774,8 +774,8 @@ public ClusterUpdateSettingsRequestBuilder prepareUpdateSettings() { } @Override - public NodesReInitRequestBuilder prepareReInit() { - return new NodesReInitRequestBuilder(this, NodesReInitAction.INSTANCE); + public NodesReloadSecureSettingsRequestBuilder prepareReloadSecureSettings() { + return new NodesReloadSecureSettingsRequestBuilder(this, NodesReloadSecureSettingsAction.INSTANCE); } @Override diff --git a/server/src/main/java/org/elasticsearch/plugins/Plugin.java b/server/src/main/java/org/elasticsearch/plugins/Plugin.java index 0ef703448b799..65d47682a95c0 100644 --- a/server/src/main/java/org/elasticsearch/plugins/Plugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/Plugin.java @@ -74,6 +74,7 @@ *
  • {@link RepositoryPlugin} *
  • {@link ScriptPlugin} *
  • {@link SearchPlugin} + *
  • {@link ReloadablePlugin} * *

    In addition to extension points this class also declares some {@code @Deprecated} {@code public final void onModule} methods. These * methods should cause any extensions of {@linkplain Plugin} that used the pre-5.x style extension syntax to fail to build and point the diff --git a/server/src/main/java/org/elasticsearch/plugins/ReInitializablePlugin.java b/server/src/main/java/org/elasticsearch/plugins/ReInitializablePlugin.java deleted file mode 100644 index 8295305a97110..0000000000000 --- a/server/src/main/java/org/elasticsearch/plugins/ReInitializablePlugin.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.plugins; - -import org.elasticsearch.common.settings.Settings; - -public interface ReInitializablePlugin { - boolean reinit(Settings settings); -} diff --git a/server/src/main/java/org/elasticsearch/plugins/ReloadablePlugin.java b/server/src/main/java/org/elasticsearch/plugins/ReloadablePlugin.java new file mode 100644 index 0000000000000..86d7759185e69 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/plugins/ReloadablePlugin.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugins; + +import org.elasticsearch.common.settings.Settings; + +/** + * An extension point for {@link Plugin}s that can be reloaded. There is no + * clear definition about what reloading a plugin actually means. When a plugin + * is reloaded it might rebuild any internal members. Plugins usually implement + * this interface in order to reread the values of {@code SecureSetting}s and + * then rebuild any dependent internal members. + */ +public interface ReloadablePlugin { + /** + * Called to trigger the rebuilt of the plugin's internal members. The reload + * operation is required to have been completed when the method returns. + * Strictly speaking, the settings argument should not be accessed + * outside of this method's call stack, as any values stored in the node's + * keystore (see {@code SecureSetting}) will not otherwise be retrievable. The + * setting values do not follow dynamic updates, i.e. the values are identical + * to the ones during the initial plugin loading, barring the keystore file on + * disk changes. Any failure during the operation should be signaled by raising + * an exception, but the plugin should otherwise continue to function + * unperturbed. + * + * @param settings + * Settings used while reloading the plugin. All values are + * retrievable, including the values stored in the node's keystore. + * The setting values are the initial ones, from when the node has be + * started, i.e. they don't follow dynamic updates. + * @throws Exception + * if the operation failed. The plugin should continue to operate as + * if the offending call didn't happen. + */ + void reload(Settings settings) throws Exception; +} \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReInitAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java similarity index 68% rename from server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReInitAction.java rename to server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java index c7a5d5d809f47..4533f36dd6cfc 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReInitAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java @@ -19,9 +19,7 @@ package org.elasticsearch.rest.action.admin.cluster; -import org.elasticsearch.action.admin.cluster.reinit.NodesReInitAction; -import org.elasticsearch.action.admin.cluster.reinit.NodesReInitRequest; -import org.elasticsearch.action.admin.cluster.reinit.NodesReInitResponse; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -39,31 +37,32 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; -public final class RestReInitAction extends BaseRestHandler { +public final class RestReloadSecureSettingsAction extends BaseRestHandler { - public RestReInitAction(Settings settings, RestController controller) { + public RestReloadSecureSettingsAction(Settings settings, RestController controller) { super(settings); - controller.registerHandler(POST, "/_nodes/reinit", this); - controller.registerHandler(POST, "/_nodes/{nodeId}/reinit", this); + controller.registerHandler(POST, "/_nodes/reload_secure_settings", this); + controller.registerHandler(POST, "/_nodes/{nodeId}/reload_secure_settings", this); } @Override public String getName() { - return "nodes_reinit_action"; + return "nodes_reload_action"; } @Override public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); - final NodesReInitRequest nodesReInitRequest = new NodesReInitRequest(nodesIds); - nodesReInitRequest.timeout(request.param("timeout")); - nodesReInitRequest.secureStorePassword(request.param("secureStorePassword", "")); - - return channel -> client.admin().cluster().execute(NodesReInitAction.INSTANCE, nodesReInitRequest, - new RestBuilderListener(channel) { - + return channel -> client.admin() + .cluster() + .prepareReloadSecureSettings() + .setTimeout(request.param("timeout")) + .setNodesIds(nodesIds) + .setSecureStorePassword(request.param("secure_settings_password", "")) + .execute(new RestBuilderListener(channel) { @Override - public RestResponse buildResponse(NodesReInitResponse response, XContentBuilder builder) throws Exception { + public RestResponse buildResponse(NodesReloadSecureSettingsResponse response, XContentBuilder builder) + throws Exception { builder.startObject(); RestActions.buildNodesHeader(builder, channel.request(), response); builder.field("cluster_name", response.getClusterName().value()); @@ -73,7 +72,6 @@ public RestResponse buildResponse(NodesReInitResponse response, XContentBuilder return new BytesRestResponse(RestStatus.OK, builder); } }); - } @Override From 759405f4221bcef613dc31ecda3b2cff53acb1c5 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 5 Jun 2018 12:02:49 +0300 Subject: [PATCH 10/21] Odd failed merge fallout --- .../node/reload/NodesReloadSecureSettingsAction.java | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java index 705756e6a628b..ccaeca8702f0b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java @@ -20,10 +20,9 @@ package org.elasticsearch.action.admin.cluster.node.reload; import org.elasticsearch.action.Action; -import org.elasticsearch.client.ElasticsearchClient; public class NodesReloadSecureSettingsAction - extends Action { + extends Action { public static final NodesReloadSecureSettingsAction INSTANCE = new NodesReloadSecureSettingsAction(); public static final String NAME = "cluster:admin/nodes/reload_secure_settings"; @@ -37,8 +36,4 @@ public NodesReloadSecureSettingsResponse newResponse() { return new NodesReloadSecureSettingsResponse(); } - @Override - public NodesReloadSecureSettingsRequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new NodesReloadSecureSettingsRequestBuilder(client, this); - } } From b1d2f51b3c3d580107a6b3685916cd5418b9b642 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 5 Jun 2018 12:53:08 +0300 Subject: [PATCH 11/21] rename back AmazonEc2Mock -> AmazonEC2Mock because of checkstyle exceptions --- ...{AmazonEc2Mock.java => AmazonEC2Mock.java} | 6 +-- .../discovery/ec2/AwsEc2ServiceMock.java | 2 +- .../ec2/Ec2DiscoveryPluginTests.java | 46 +++++++++---------- .../discovery/ec2/Ec2DiscoveryTests.java | 20 ++++---- 4 files changed, 37 insertions(+), 37 deletions(-) rename plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/{AmazonEc2Mock.java => AmazonEC2Mock.java} (99%) diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEc2Mock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java similarity index 99% rename from plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEc2Mock.java rename to plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java index f18375e583295..aa08447fd208b 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEc2Mock.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java @@ -519,9 +519,9 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -public class AmazonEc2Mock implements AmazonEC2 { +public class AmazonEC2Mock implements AmazonEC2 { - private static final Logger logger = ESLoggerFactory.getLogger(AmazonEc2Mock.class.getName()); + private static final Logger logger = ESLoggerFactory.getLogger(AmazonEC2Mock.class.getName()); public static final String PREFIX_PRIVATE_IP = "10.0.0."; public static final String PREFIX_PUBLIC_IP = "8.8.8."; @@ -535,7 +535,7 @@ public class AmazonEc2Mock implements AmazonEC2 { final AWSCredentialsProvider credentials; final ClientConfiguration configuration; - public AmazonEc2Mock(int nodes, List> tagsList, AWSCredentialsProvider credentials, ClientConfiguration configuration) { + public AmazonEC2Mock(int nodes, List> tagsList, AWSCredentialsProvider credentials, ClientConfiguration configuration) { if (tagsList != null) { assert tagsList.size() == nodes; } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java index cbe670561e6b7..0596dd697b2eb 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java @@ -41,7 +41,7 @@ public AwsEc2ServiceMock(Settings settings, int nodes, List> tagsList) @Override AmazonEC2 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) { - return new AmazonEc2Mock(nodes, tagsList, credentials, configuration); + return new AmazonEC2Mock(nodes, tagsList, credentials, configuration); } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java index 5db5bf84ab936..6001ab56d5042 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java @@ -89,7 +89,7 @@ public void testNodeAttributesErrorLenient() throws Exception { public void testDefaultEndpoint() throws IOException { try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(Settings.EMPTY)) { - final String endpoint = ((AmazonEc2Mock) plugin.ec2Service.client().client()).endpoint; + final String endpoint = ((AmazonEC2Mock) plugin.ec2Service.client().client()).endpoint; assertThat(endpoint, nullValue()); } } @@ -97,7 +97,7 @@ public void testDefaultEndpoint() throws IOException { public void testSpecificEndpoint() throws IOException { final Settings settings = Settings.builder().put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2.endpoint").build(); try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(settings)) { - final String endpoint = ((AmazonEc2Mock) plugin.ec2Service.client().client()).endpoint; + final String endpoint = ((AmazonEC2Mock) plugin.ec2Service.client().client()).endpoint; assertThat(endpoint, is("ec2.endpoint")); } } @@ -127,32 +127,32 @@ public void testClientSettingsReInit() throws IOException { .build(); try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(settings1)) { try (AmazonEc2Reference clientReference = plugin.ec2Service.client()) { - assertThat(((AmazonEc2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_1")); - assertThat(((AmazonEc2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_1")); - assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_1")); - assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1")); - assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1")); - assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyPort(), is(881)); - assertThat(((AmazonEc2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(881)); + assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1")); // reload secure settings2 plugin.reload(settings2); // client is not released, it is still using the old settings - assertThat(((AmazonEc2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_1")); - assertThat(((AmazonEc2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_1")); - assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_1")); - assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1")); - assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1")); - assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyPort(), is(881)); - assertThat(((AmazonEc2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(881)); + assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1")); } try (AmazonEc2Reference clientReference = plugin.ec2Service.client()) { - assertThat(((AmazonEc2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_2")); - assertThat(((AmazonEc2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_2")); - assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_2")); - assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_2")); - assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_2")); - assertThat(((AmazonEc2Mock) clientReference.client()).configuration.getProxyPort(), is(882)); - assertThat(((AmazonEc2Mock) clientReference.client()).endpoint, is("ec2_endpoint_2")); + assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_2")); + assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_2")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_2")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_2")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_2")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(882)); + assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_2")); } } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java index f31295678d8f9..43cc924fadb10 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java @@ -114,7 +114,7 @@ public void testDefaultSettings() throws InterruptedException { public void testPrivateIp() throws InterruptedException { int nodes = randomInt(10); for (int i = 0; i < nodes; i++) { - poorMansDNS.put(AmazonEc2Mock.PREFIX_PRIVATE_IP + (i+1), buildNewFakeTransportAddress()); + poorMansDNS.put(AmazonEC2Mock.PREFIX_PRIVATE_IP + (i+1), buildNewFakeTransportAddress()); } Settings nodeSettings = Settings.builder() .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "private_ip") @@ -125,7 +125,7 @@ public void testPrivateIp() throws InterruptedException { int node = 1; for (DiscoveryNode discoveryNode : discoveryNodes) { TransportAddress address = discoveryNode.getAddress(); - TransportAddress expected = poorMansDNS.get(AmazonEc2Mock.PREFIX_PRIVATE_IP + node++); + TransportAddress expected = poorMansDNS.get(AmazonEC2Mock.PREFIX_PRIVATE_IP + node++); assertEquals(address, expected); } } @@ -133,7 +133,7 @@ public void testPrivateIp() throws InterruptedException { public void testPublicIp() throws InterruptedException { int nodes = randomInt(10); for (int i = 0; i < nodes; i++) { - poorMansDNS.put(AmazonEc2Mock.PREFIX_PUBLIC_IP + (i+1), buildNewFakeTransportAddress()); + poorMansDNS.put(AmazonEC2Mock.PREFIX_PUBLIC_IP + (i+1), buildNewFakeTransportAddress()); } Settings nodeSettings = Settings.builder() .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "public_ip") @@ -144,7 +144,7 @@ public void testPublicIp() throws InterruptedException { int node = 1; for (DiscoveryNode discoveryNode : discoveryNodes) { TransportAddress address = discoveryNode.getAddress(); - TransportAddress expected = poorMansDNS.get(AmazonEc2Mock.PREFIX_PUBLIC_IP + node++); + TransportAddress expected = poorMansDNS.get(AmazonEC2Mock.PREFIX_PUBLIC_IP + node++); assertEquals(address, expected); } } @@ -153,8 +153,8 @@ public void testPrivateDns() throws InterruptedException { int nodes = randomInt(10); for (int i = 0; i < nodes; i++) { String instanceId = "node" + (i+1); - poorMansDNS.put(AmazonEc2Mock.PREFIX_PRIVATE_DNS + instanceId + - AmazonEc2Mock.SUFFIX_PRIVATE_DNS, buildNewFakeTransportAddress()); + poorMansDNS.put(AmazonEC2Mock.PREFIX_PRIVATE_DNS + instanceId + + AmazonEC2Mock.SUFFIX_PRIVATE_DNS, buildNewFakeTransportAddress()); } Settings nodeSettings = Settings.builder() .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "private_dns") @@ -167,7 +167,7 @@ public void testPrivateDns() throws InterruptedException { String instanceId = "node" + node++; TransportAddress address = discoveryNode.getAddress(); TransportAddress expected = poorMansDNS.get( - AmazonEc2Mock.PREFIX_PRIVATE_DNS + instanceId + AmazonEc2Mock.SUFFIX_PRIVATE_DNS); + AmazonEC2Mock.PREFIX_PRIVATE_DNS + instanceId + AmazonEC2Mock.SUFFIX_PRIVATE_DNS); assertEquals(address, expected); } } @@ -176,8 +176,8 @@ public void testPublicDns() throws InterruptedException { int nodes = randomInt(10); for (int i = 0; i < nodes; i++) { String instanceId = "node" + (i+1); - poorMansDNS.put(AmazonEc2Mock.PREFIX_PUBLIC_DNS + instanceId - + AmazonEc2Mock.SUFFIX_PUBLIC_DNS, buildNewFakeTransportAddress()); + poorMansDNS.put(AmazonEC2Mock.PREFIX_PUBLIC_DNS + instanceId + + AmazonEC2Mock.SUFFIX_PUBLIC_DNS, buildNewFakeTransportAddress()); } Settings nodeSettings = Settings.builder() .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "public_dns") @@ -190,7 +190,7 @@ public void testPublicDns() throws InterruptedException { String instanceId = "node" + node++; TransportAddress address = discoveryNode.getAddress(); TransportAddress expected = poorMansDNS.get( - AmazonEc2Mock.PREFIX_PUBLIC_DNS + instanceId + AmazonEc2Mock.SUFFIX_PUBLIC_DNS); + AmazonEC2Mock.PREFIX_PUBLIC_DNS + instanceId + AmazonEC2Mock.SUFFIX_PUBLIC_DNS); assertEquals(address, expected); } } From cd0113bcce8d6d85cdce5830d0a900e0bf04647a Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 5 Jun 2018 14:25:32 +0300 Subject: [PATCH 12/21] One line javadoc fix --- .../org/elasticsearch/discovery/ec2/AmazonEc2Reference.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AmazonEc2Reference.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AmazonEc2Reference.java index 9462738a539f4..0b0b208790b48 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AmazonEc2Reference.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AmazonEc2Reference.java @@ -25,7 +25,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRefCounted; /** - * Handles the shutdown of the wrapped {@link AmazonEC2Client} using reference + * Handles the shutdown of the wrapped {@link AmazonEC2} using reference * counting. */ public class AmazonEc2Reference extends AbstractRefCounted implements Releasable { From 918827c925e2fd4b9a5780111b1fe8e10561616f Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Sat, 9 Jun 2018 20:18:31 +0300 Subject: [PATCH 13/21] Fallout from #30652 --- .../repositories/s3/S3Repository.java | 18 +++++++++++------- .../s3/S3BlobStoreRepositoryTests.java | 7 +++++-- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 9f984c4b5e362..063e266837bad 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -188,13 +188,7 @@ class S3Repository extends BlobStoreRepository { // deprecated behavior: override client credentials from the cluster state // (repository settings) if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { - deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead " - + "store these in named clients and the elasticsearch keystore for secure settings."); - final BasicAWSCredentials insecureCredentials = S3ClientSettings.loadDeprecatedCredentials(metadata.settings()); - // hack, but that's ok because the whole if branch should be axed - final Map prevSettings = awsService.refreshAndClearCache(S3ClientSettings.load(Settings.EMPTY)); - final Map newSettings = S3ClientSettings.overrideCredentials(prevSettings, insecureCredentials); - awsService.refreshAndClearCache(newSettings); + overrideCredentialsFromClusterState(awsService); } blobStore = new S3BlobStore(settings, awsService, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); @@ -225,4 +219,14 @@ protected boolean isCompress() { protected ByteSizeValue chunkSize() { return chunkSize; } + + void overrideCredentialsFromClusterState(AwsS3Service awsService) { + deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead " + + "store these in named clients and the elasticsearch keystore for secure settings."); + final BasicAWSCredentials insecureCredentials = S3ClientSettings.loadDeprecatedCredentials(metadata.settings()); + // hack, but that's ok because the whole if branch should be axed + final Map prevSettings = awsService.refreshAndClearCache(S3ClientSettings.load(Settings.EMPTY)); + final Map newSettings = S3ClientSettings.overrideCredentials(prevSettings, insecureCredentials); + awsService.refreshAndClearCache(newSettings); + } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 9a340867bba91..2843390f1aa80 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -115,8 +115,11 @@ public Map getRepositories(final Environment env, fi public synchronized AmazonS3Reference client(String clientName) { return new AmazonS3Reference(new MockAmazonS3(blobs, bucket, serverSideEncryption, cannedACL, storageClass)); } - })); - + }) { + @Override + void overrideCredentialsFromClusterState(AwsS3Service awsService) { + } + }); } } From 62debd194b99a445b5d3e651ee0d736b342ba6a9 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Sat, 16 Jun 2018 20:53:04 +0300 Subject: [PATCH 14/21] [Refactoring] Ec2 and GCS plugins build client lazily (#31250) --- .../discovery/ec2/AwsEc2Service.java | 3 +- .../discovery/ec2/AwsEc2ServiceImpl.java | 60 ++++------ .../azure/AzureStorageServiceMock.java | 2 +- .../gcs/GoogleCloudStorageService.java | 60 +++++----- .../common/util/LazyInitializable.java | 108 ++++++++++++++++++ 5 files changed, 161 insertions(+), 72 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/common/util/LazyInitializable.java diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Service.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Service.java index c001e35ad1ee6..976f1db26d173 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Service.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Service.java @@ -93,8 +93,7 @@ class HostType { * client. * * @param clientSettings the new refreshed settings - * @return the old stale settings */ - Ec2ClientSettings refreshAndClearCache(Ec2ClientSettings clientSettings); + void refreshAndClearCache(Ec2ClientSettings clientSettings); } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java index b6c0a127096c4..67902174630ea 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java @@ -19,8 +19,8 @@ package org.elasticsearch.discovery.ec2; -import java.io.IOException; import java.util.Random; +import java.util.concurrent.atomic.AtomicReference; import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.AWSCredentialsProvider; @@ -32,16 +32,19 @@ import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.AmazonEC2Client; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.LazyInitializable; class AwsEc2ServiceImpl extends AbstractComponent implements AwsEc2Service { public static final String EC2_METADATA_URL = "http://169.254.169.254/latest/meta-data/"; - private volatile AmazonEc2Reference clientReference; - private volatile Ec2ClientSettings clientSettings; + private final AtomicReference> lazyClientReference = + new AtomicReference<>(); AwsEc2ServiceImpl(Settings settings) { super(settings); @@ -108,52 +111,35 @@ static AWSCredentialsProvider buildCredentials(Logger logger, Ec2ClientSettings @Override public AmazonEc2Reference client() { - if ((clientReference != null) && clientReference.tryIncRef()) { - return clientReference; - } - synchronized (this) { - if ((clientReference != null) && clientReference.tryIncRef()) { - return clientReference; - } - if (clientSettings == null) { - throw new IllegalArgumentException("Missing ec2 client configs."); - } - final AmazonEc2Reference clientReference = new AmazonEc2Reference(buildClient(clientSettings)); - clientReference.incRef(); - this.clientReference = clientReference; - return clientReference; + final LazyInitializable clientReference = this.lazyClientReference.get(); + if (clientReference == null) { + throw new IllegalStateException("Missing ec2 client configs"); } + return clientReference.getOrCompute(); } - /** - * Refreshes the settings for the AmazonEC2 client. New clients will be build - * using these new settings. Old client is usable until released. On release it + * Refreshes the settings for the AmazonEC2 client. The new client will be build + * using these new settings. The old client is usable until released. On release it * will be destroyed instead of being returned to the cache. */ @Override - public synchronized Ec2ClientSettings refreshAndClearCache(Ec2ClientSettings clientSettings) { - // shutdown all unused clients - // others will shutdown on their respective release - releaseCachedClient(); - final Ec2ClientSettings prevSettings = this.clientSettings; - this.clientSettings = clientSettings; - return prevSettings; + public void refreshAndClearCache(Ec2ClientSettings clientSettings) { + final LazyInitializable newClient = new LazyInitializable<>( + () -> new AmazonEc2Reference(buildClient(clientSettings)), clientReference -> clientReference.incRef(), + clientReference -> clientReference.decRef()); + final LazyInitializable oldClient = this.lazyClientReference.getAndSet(newClient); + if (oldClient != null) { + oldClient.reset(); + } } @Override public void close() { - releaseCachedClient(); - } - - private synchronized void releaseCachedClient() { - if (this.clientReference == null) { - return; + final LazyInitializable clientReference = this.lazyClientReference.getAndSet(null); + if (clientReference != null) { + clientReference.reset(); } - // the client will shutdown when it will not be used anymore - this.clientReference.decRef(); - // clear the cached client, it will be build lazily - this.clientReference = null; // shutdown IdleConnectionReaper background thread // it will be restarted on new client usage IdleConnectionReaper.shutdown(); diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index bb6afee8f7b09..a680af06fc655 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -115,7 +115,7 @@ public Map listBlobsByPrefix(String account, String contai @Override public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize) - throws URISyntaxException, StorageException, FileAlreadyExistsExceeption { + throws URISyntaxException, StorageException, FileAlreadyExistsException { if (blobs.containsKey(blobName)) { throw new FileAlreadyExistsException(blobName); } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java index 9fe78dfb9970b..b24674da174c3 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java @@ -34,21 +34,25 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.LazyInitializable; + import java.io.IOException; import java.net.HttpURLConnection; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; import static java.util.Collections.emptyMap; public class GoogleCloudStorageService extends AbstractComponent { - /** Clients settings identified by client name. */ - private volatile Map clientsSettings = emptyMap(); - /** Cache of client instances. Client instances are built once for each setting change. */ - private volatile Map clientsCache = emptyMap(); + /** + * Dictionary of client instances. Client instances are built lazily from the + * latest settings. + */ + private final AtomicReference>> clientsCache = new AtomicReference<>(emptyMap()); public GoogleCloudStorageService(final Settings settings) { super(settings); @@ -57,18 +61,21 @@ public GoogleCloudStorageService(final Settings settings) { /** * Refreshes the client settings and clears the client cache. Subsequent calls to * {@code GoogleCloudStorageService#client} will return new clients constructed - * using these passed settings. + * using the parameter settings. * * @param clientsSettings the new settings used for building clients for subsequent requests - * @return previous settings which have been substituted */ - public synchronized Map - refreshAndClearCache(Map clientsSettings) { - final Map prevSettings = this.clientsSettings; - this.clientsSettings = MapBuilder.newMapBuilder(clientsSettings).immutableMap(); - this.clientsCache = emptyMap(); - // clients are built lazily by {@link client(String)} - return prevSettings; + public synchronized void refreshAndClearCache(Map clientsSettings) { + // build the new lazy clients + final MapBuilder> newClientsCache = MapBuilder.newMapBuilder(); + for (final Map.Entry entry : clientsSettings.entrySet()) { + newClientsCache.put(entry.getKey(), + new LazyInitializable(() -> createClient(entry.getKey(), entry.getValue()))); + } + // make the new clients available + final Map> oldClientCache = clientsCache.getAndSet(newClientsCache.immutableMap()); + // release old clients + oldClientCache.values().forEach(LazyInitializable::reset); } /** @@ -83,37 +90,26 @@ public GoogleCloudStorageService(final Settings settings) { * (blobs) */ public Storage client(final String clientName) throws IOException { - Storage storage = clientsCache.get(clientName); - if (storage != null) { - return storage; - } - synchronized (this) { - storage = clientsCache.get(clientName); - if (storage != null) { - return storage; - } - storage = SocketAccess.doPrivilegedIOException(() -> createClient(clientName)); - clientsCache = MapBuilder.newMapBuilder(clientsCache).put(clientName, storage).immutableMap(); - return storage; + final LazyInitializable lazyClient = clientsCache.get().get(clientName); + if (lazyClient == null) { + throw new IllegalArgumentException("Unknown client name [" + clientName + "]. Existing client configs: " + + Strings.collectionToDelimitedString(clientsCache.get().keySet(), ",")); } + return lazyClient.getOrCompute(); } /** * Creates a client that can be used to manage Google Cloud Storage objects. The client is thread-safe. * * @param clientName name of client settings to use, including secure settings + * @param clientSettings name of client settings to use, including secure settings * @return a new client storage instance that can be used to manage objects * (blobs) */ - private Storage createClient(final String clientName) throws Exception { - final GoogleCloudStorageClientSettings clientSettings = clientsSettings.get(clientName); - if (clientSettings == null) { - throw new IllegalArgumentException("Unknown client name [" + clientName + "]. Existing client configs: " - + Strings.collectionToDelimitedString(clientsSettings.keySet(), ",")); - } + private Storage createClient(final String clientName, final GoogleCloudStorageClientSettings clientSettings) throws IOException { logger.debug(() -> new ParameterizedMessage("creating GCS client with client_name [{}], endpoint [{}]", clientName, clientSettings.getHost())); - final HttpTransport httpTransport = createHttpTransport(clientSettings.getHost()); + final HttpTransport httpTransport = SocketAccess.doPrivilegedIOException(() -> createHttpTransport(clientSettings.getHost())); final HttpTransportOptions httpTransportOptions = HttpTransportOptions.newBuilder() .setConnectTimeout(toTimeout(clientSettings.getConnectTimeout())) .setReadTimeout(toTimeout(clientSettings.getReadTimeout())) diff --git a/server/src/main/java/org/elasticsearch/common/util/LazyInitializable.java b/server/src/main/java/org/elasticsearch/common/util/LazyInitializable.java new file mode 100644 index 0000000000000..6675dccf2c3d2 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/util/LazyInitializable.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.util; + +import org.elasticsearch.common.CheckedSupplier; + +import java.util.Objects; +import java.util.function.Consumer; + +/** + * Encapsulates a {@link CheckedSupplier} which is lazily invoked once on the + * first call to {@code #getOrCompute()}. The value which the + * supplier returns is memorized and will be served until + * {@code #reset()} is called. Each value returned by {@code #getOrCompute()}, + * newly minted or cached, will be passed to the onGet + * {@link Consumer}. On {@code #reset()} the value will be passed to the + * onReset {@code Consumer} and the next {@code #getOrCompute()} + * will regenerate the value. + */ +public final class LazyInitializable { + + private final CheckedSupplier supplier; + private final Consumer onGet; + private final Consumer onReset; + private volatile T value; + + /** + * Creates the simple LazyInitializable instance. + * + * @param supplier + * The {@code CheckedSupplier} to generate values which will be + * served on {@code #getOrCompute()} invocations. + */ + public LazyInitializable(CheckedSupplier supplier) { + this(supplier, v -> {}, v -> {}); + } + + /** + * Creates the complete LazyInitializable instance. + * + * @param supplier + * The {@code CheckedSupplier} to generate values which will be + * served on {@code #getOrCompute()} invocations. + * @param onGet + * A {@code Consumer} which is called on each value, newly forged or + * stale, that is returned by {@code #getOrCompute()} + * @param onReset + * A {@code Consumer} which is invoked on the value that will be + * erased when calling {@code #reset()} + */ + public LazyInitializable(CheckedSupplier supplier, Consumer primer, Consumer finalizer) { + this.supplier = supplier; + this.onGet = primer; + this.onReset = finalizer; + } + + /** + * Returns a value that was created by supplier. The value might + * have been previously created, if not it will be created now, thread safe of + * course. + */ + public T getOrCompute() throws E { + final T readOnce = value; // Read volatile just once... + final T result = readOnce == null ? maybeCompute(supplier) : readOnce; + onGet.accept(result); + return result; + } + + /** + * Clears the value, if it has been previously created by calling + * {@code #getOrCompute()}. The onReset will be called on this + * value. The next call to {@code #getOrCompute()} will recreate the value. + */ + public synchronized void reset() { + if (value != null) { + onReset.accept(value); + value = null; + } + } + + /** + * Creates a new value thread safely. + */ + private synchronized T maybeCompute(CheckedSupplier supplier) throws E { + if (value == null) { + value = Objects.requireNonNull(supplier.get()); + } + return value; + } + +} From 80f6d9eedd3b3fef087b48b92d7510abf6ca670d Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Sat, 16 Jun 2018 21:12:30 +0300 Subject: [PATCH 15/21] [TEST] Reload secure settings transport IT (#31180) --- ...nsportNodesReloadSecureSettingsAction.java | 4 + .../common/settings/KeyStoreWrapper.java | 4 +- .../RestReloadSecureSettingsAction.java | 22 +- .../action/admin/ReloadSecureSettingsIT.java | 418 ++++++++++++++++++ .../action/admin/invalid.txt.keystore | 3 + 5 files changed, 430 insertions(+), 21 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java create mode 100644 server/src/test/resources/org/elasticsearch/action/admin/invalid.txt.keystore diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index 5e8cb306d497d..08b98f9219f37 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.admin.cluster.node.reload; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; @@ -102,6 +104,8 @@ protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeReque try { p.reload(settingsWithKeystore); } catch (final Exception e) { + logger.warn((Supplier) () -> new ParameterizedMessage("Reload failed for plugin [{}]", p.getClass().getSimpleName()), + e); exceptions.add(e); } }); diff --git a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java index f47760491f8d5..3a8a06949b29c 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java @@ -308,7 +308,9 @@ public void decrypt(char[] password) throws GeneralSecurityException, IOExceptio } if (formatVersion <= 2) { decryptLegacyEntries(); - assert password.length == 0; + if (password.length != 0) { + throw new IllegalArgumentException("Keystore format does not accept non-empty passwords"); + } return; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java index 4533f36dd6cfc..8a73f0db339df 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java @@ -19,19 +19,13 @@ package org.elasticsearch.rest.action.admin.cluster; -import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.rest.action.RestActions; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestActions.NodesResponseRestListener; import java.io.IOException; @@ -59,19 +53,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client .setTimeout(request.param("timeout")) .setNodesIds(nodesIds) .setSecureStorePassword(request.param("secure_settings_password", "")) - .execute(new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(NodesReloadSecureSettingsResponse response, XContentBuilder builder) - throws Exception { - builder.startObject(); - RestActions.buildNodesHeader(builder, channel.request(), response); - builder.field("cluster_name", response.getClusterName().value()); - response.toXContent(builder, channel.request()); - builder.endObject(); - - return new BytesRestResponse(RestStatus.OK, builder); - } - }); + .execute(new NodesResponseRestListener<>(channel)); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java b/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java new file mode 100644 index 0000000000000..db3bf14c66347 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java @@ -0,0 +1,418 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.ReloadablePlugin; +import org.elasticsearch.test.ESIntegTestCase; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.StandardCopyOption; +import java.security.AccessControlException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.containsString; + +public class ReloadSecureSettingsIT extends ESIntegTestCase { + + public void testMissingKeystoreFile() throws Exception { + final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); + final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) + .stream().findFirst().get(); + final Environment environment = internalCluster().getInstance(Environment.class); + final AtomicReference reloadSettingsError = new AtomicReference<>(); + // keystore file should be missing for this test case + Files.deleteIfExists(KeyStoreWrapper.keystorePath(environment.configFile())); + final int initialReloadCount = mockReloadablePlugin.getReloadCount(); + final CountDownLatch latch = new CountDownLatch(1); + client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword("").execute( + new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); + assertThat(nodeResponse.reloadException(), instanceOf(IllegalStateException.class)); + assertThat(nodeResponse.reloadException().getMessage(), containsString("Keystore is missing")); + } + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + latch.await(); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + // in the missing keystore case no reload should be triggered + assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); + } + + public void testNullKeystorePassword() throws Exception { + final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); + final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) + .stream().findFirst().get(); + final AtomicReference reloadSettingsError = new AtomicReference<>(); + final int initialReloadCount = mockReloadablePlugin.getReloadCount(); + final CountDownLatch latch = new CountDownLatch(1); + client().admin().cluster().prepareReloadSecureSettings().execute( + new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + reloadSettingsError.set(new AssertionError("Null keystore password should fail")); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + try { + assertThat(e, instanceOf(ActionRequestValidationException.class)); + assertThat(e.getMessage(), containsString("secure settings password cannot be null")); + } catch (final AssertionError ae) { + reloadSettingsError.set(ae); + } finally { + latch.countDown(); + } + } + }); + latch.await(); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + // in the null password case no reload should be triggered + assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); + } + + public void testInvalidKeystoreFile() throws Exception { + final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); + final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) + .stream().findFirst().get(); + final Environment environment = internalCluster().getInstance(Environment.class); + final AtomicReference reloadSettingsError = new AtomicReference<>(); + final int initialReloadCount = mockReloadablePlugin.getReloadCount(); + // invalid "keystore" file should be present in the config dir + try (InputStream keystore = ReloadSecureSettingsIT.class.getResourceAsStream("invalid.txt.keystore")) { + if (Files.exists(environment.configFile()) == false) { + Files.createDirectory(environment.configFile()); + } + Files.copy(keystore, KeyStoreWrapper.keystorePath(environment.configFile()), StandardCopyOption.REPLACE_EXISTING); + } + final CountDownLatch latch = new CountDownLatch(1); + client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword("").execute( + new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); + } + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + latch.await(); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + // in the invalid keystore format case no reload should be triggered + assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); + } + + public void testWrongKeystorePassword() throws Exception { + final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); + final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) + .stream().findFirst().get(); + final Environment environment = internalCluster().getInstance(Environment.class); + final AtomicReference reloadSettingsError = new AtomicReference<>(); + final int initialReloadCount = mockReloadablePlugin.getReloadCount(); + // "some" keystore should be present in this case + writeEmptyKeystore(environment, new char[0]); + final CountDownLatch latch = new CountDownLatch(1); + client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword("Wrong password here").execute( + new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); + assertThat(nodeResponse.reloadException(), instanceOf(IOException.class)); + } + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + latch.await(); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + // in the wrong password case no reload should be triggered + assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); + } + + public void testMisbehavingPlugin() throws Exception { + final Environment environment = internalCluster().getInstance(Environment.class); + final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); + final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) + .stream().findFirst().get(); + // make plugins throw on reload + for (final String nodeName : internalCluster().getNodeNames()) { + internalCluster().getInstance(PluginsService.class, nodeName) + .filterPlugins(MisbehavingReloadablePlugin.class) + .stream().findFirst().get().setShouldThrow(true); + } + final AtomicReference reloadSettingsError = new AtomicReference<>(); + final int initialReloadCount = mockReloadablePlugin.getReloadCount(); + // "some" keystore should be present + final SecureSettings secureSettings = writeEmptyKeystore(environment, new char[0]); + // read seed setting value from the test case (not from the node) + final String seedValue = KeyStoreWrapper.SEED_SETTING + .get(Settings.builder().put(environment.settings()).setSecureSettings(secureSettings).build()) + .toString(); + final CountDownLatch latch = new CountDownLatch(1); + client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword("").execute( + new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); + assertThat(nodeResponse.reloadException().getMessage(), containsString("If shouldThrow I throw")); + } + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + latch.await(); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + // even if one plugin fails to reload (throws Exception), others should be + // unperturbed + assertThat(mockReloadablePlugin.getReloadCount() - initialReloadCount, equalTo(1)); + // mock plugin should have been reloaded successfully + assertThat(mockReloadablePlugin.getSeedValue(), equalTo(seedValue)); + } + + public void testReloadWhileKeystoreChanged() throws Exception { + final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); + final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) + .stream().findFirst().get(); + final Environment environment = internalCluster().getInstance(Environment.class); + final int initialReloadCount = mockReloadablePlugin.getReloadCount(); + for (int i = 0; i < randomIntBetween(4, 8); i++) { + // write keystore + final SecureSettings secureSettings = writeEmptyKeystore(environment, new char[0]); + // read seed setting value from the test case (not from the node) + final String seedValue = KeyStoreWrapper.SEED_SETTING + .get(Settings.builder().put(environment.settings()).setSecureSettings(secureSettings).build()) + .toString(); + // reload call + successfulReloadCall(); + assertThat(mockReloadablePlugin.getSeedValue(), equalTo(seedValue)); + assertThat(mockReloadablePlugin.getReloadCount() - initialReloadCount, equalTo(i + 1)); + } + } + + @Override + protected Collection> nodePlugins() { + final List> plugins = Arrays.asList(MockReloadablePlugin.class, MisbehavingReloadablePlugin.class); + // shuffle as reload is called in order + Collections.shuffle(plugins, random()); + return plugins; + } + + private void successfulReloadCall() throws InterruptedException { + final AtomicReference reloadSettingsError = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); + client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword("").execute( + new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), nullValue()); + } + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + latch.await(); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + } + + private SecureSettings writeEmptyKeystore(Environment environment, char[] password) throws Exception { + final KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create(); + try { + keyStoreWrapper.save(environment.configFile(), password); + } catch (final AccessControlException e) { + if (e.getPermission() instanceof RuntimePermission && e.getPermission().getName().equals("accessUserInformation")) { + // this is expected: the save method is extra diligent and wants to make sure + // the keystore is readable, not relying on umask and whatnot. It's ok, we don't + // care about this in tests. + } else { + throw e; + } + } + return keyStoreWrapper; + } + + public static class CountingReloadablePlugin extends Plugin implements ReloadablePlugin { + + private volatile int reloadCount; + + public CountingReloadablePlugin() { + } + + @Override + public void reload(Settings settings) throws Exception { + reloadCount++; + } + + public int getReloadCount() { + return reloadCount; + } + + } + + public static class MockReloadablePlugin extends CountingReloadablePlugin { + + private volatile String seedValue; + + public MockReloadablePlugin() { + } + + @Override + public void reload(Settings settings) throws Exception { + super.reload(settings); + this.seedValue = KeyStoreWrapper.SEED_SETTING.get(settings).toString(); + } + + public String getSeedValue() { + return seedValue; + } + + } + + public static class MisbehavingReloadablePlugin extends CountingReloadablePlugin { + + private boolean shouldThrow = false; + + public MisbehavingReloadablePlugin() { + } + + @Override + public synchronized void reload(Settings settings) throws Exception { + super.reload(settings); + if (shouldThrow) { + shouldThrow = false; + throw new Exception("If shouldThrow I throw"); + } + } + + public synchronized void setShouldThrow(boolean shouldThrow) { + this.shouldThrow = shouldThrow; + } + } + +} diff --git a/server/src/test/resources/org/elasticsearch/action/admin/invalid.txt.keystore b/server/src/test/resources/org/elasticsearch/action/admin/invalid.txt.keystore new file mode 100644 index 0000000000000..04613ffab7f36 --- /dev/null +++ b/server/src/test/resources/org/elasticsearch/action/admin/invalid.txt.keystore @@ -0,0 +1,3 @@ +admin admin +dragon 12345 + From d9835d5db4e87a5197977adb6131fcbcb49ac1a3 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Sat, 16 Jun 2018 22:26:50 +0300 Subject: [PATCH 16/21] Nodes reload request store password as SecureString (#31261) --- .../NodesReloadSecureSettingsRequest.java | 85 +++++++++++++++++-- ...desReloadSecureSettingsRequestBuilder.java | 46 +++++++++- ...nsportNodesReloadSecureSettingsAction.java | 5 +- .../RestReloadSecureSettingsAction.java | 33 +++++-- 4 files changed, 156 insertions(+), 13 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index e3a9229893ed2..50df7b1bb26e0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -19,11 +19,18 @@ package org.elasticsearch.action.admin.cluster.node.reload; + import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.nodes.BaseNodesRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.SecureString; + import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -38,7 +45,7 @@ public class NodesReloadSecureSettingsRequest extends BaseNodesRequest { + public static final String SECURE_SETTINGS_PASSWORD_FIELD_NAME = "secure_settings_password"; + public NodesReloadSecureSettingsRequestBuilder(ElasticsearchClient client, NodesReloadSecureSettingsAction action) { super(client, action, new NodesReloadSecureSettingsRequest()); } - public NodesReloadSecureSettingsRequestBuilder setSecureStorePassword(String secureStorePassword) { + public NodesReloadSecureSettingsRequestBuilder setSecureStorePassword(SecureString secureStorePassword) { request.secureStorePassword(secureStorePassword); return this; } + public NodesReloadSecureSettingsRequestBuilder source(BytesReference source, XContentType xContentType) throws IOException { + Objects.requireNonNull(xContentType); + // EMPTY is ok here because we never call namedObject + try (final InputStream stream = source.streamInput(); + final XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, stream)) { + XContentParser.Token token; + token = parser.nextToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("expected an object, but found token [{}]", token); + } + token = parser.nextToken(); + if (token != XContentParser.Token.FIELD_NAME || false == SECURE_SETTINGS_PASSWORD_FIELD_NAME.equals(parser.currentName())) { + throw new ElasticsearchParseException("expected a field named [{}], but found [{}]", SECURE_SETTINGS_PASSWORD_FIELD_NAME, + token); + } + token = parser.nextToken(); + if (token != XContentParser.Token.VALUE_STRING) { + throw new ElasticsearchParseException("expected field [{}] to be of type string, but found [{}] instead", + SECURE_SETTINGS_PASSWORD_FIELD_NAME, token); + } + final String password = parser.text(); + setSecureStorePassword(new SecureString(password.toCharArray())); + token = parser.nextToken(); + if (token != XContentParser.Token.END_OBJECT) { + throw new ElasticsearchParseException("expected end of object, but found token [{}]", token); + } + } + return this; + } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index 08b98f9219f37..c031f6d64f62a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.PluginsService; @@ -84,7 +85,7 @@ protected NodesReloadSecureSettingsResponse.NodeResponse newNodeResponse() { protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeRequest nodeReloadRequest) { final NodesReloadSecureSettingsRequest request = nodeReloadRequest.request; KeyStoreWrapper keystore = null; - try { + try (final SecureString secureSettingsPassword = request.secureSettingsPassword()) { // reread keystore from config file keystore = KeyStoreWrapper.load(environment.configFile()); if (keystore == null) { @@ -92,7 +93,7 @@ protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeReque new IllegalStateException("Keystore is missing")); } // decrypt the keystore using the password from the request - keystore.decrypt(request.secureSettingsPassword().toCharArray()); + keystore.decrypt(secureSettingsPassword.getChars()); // add the keystore to the original node settings object final Settings settingsWithKeystore = Settings.builder() .put(environment.settings(), false) diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java index 8a73f0db339df..0697871ea5d1c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java @@ -19,13 +19,21 @@ package org.elasticsearch.rest.action.admin.cluster; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequest; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestActions.NodesResponseRestListener; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestActions; +import org.elasticsearch.rest.action.RestBuilderListener; import java.io.IOException; @@ -47,13 +55,28 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); - return channel -> client.admin() + final NodesReloadSecureSettingsRequestBuilder nodesRequestBuilder = client.admin() .cluster() .prepareReloadSecureSettings() .setTimeout(request.param("timeout")) - .setNodesIds(nodesIds) - .setSecureStorePassword(request.param("secure_settings_password", "")) - .execute(new NodesResponseRestListener<>(channel)); + .source(request.requiredContent(), request.getXContentType()) + .setNodesIds(nodesIds); + final NodesReloadSecureSettingsRequest nodesRequest = nodesRequestBuilder.request(); + return channel -> nodesRequestBuilder + .execute(new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(NodesReloadSecureSettingsResponse response, XContentBuilder builder) + throws Exception { + builder.startObject(); + RestActions.buildNodesHeader(builder, channel.request(), response); + builder.field("cluster_name", response.getClusterName().value()); + response.toXContent(builder, channel.request()); + builder.endObject(); + // clear password for the original request + nodesRequest.secureSettingsPassword().close(); + return new BytesRestResponse(RestStatus.OK, builder); + } + }); } @Override From 50313303b23c383c485b0747007c2fa95588292e Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Sat, 16 Jun 2018 23:35:42 +0300 Subject: [PATCH 17/21] Checkstyle: Redundant 'final' modifier --- .../node/reload/NodesReloadSecureSettingsRequestBuilder.java | 4 ++-- .../node/reload/TransportNodesReloadSecureSettingsAction.java | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java index a082c0373c25c..b5f2f73e56f51 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java @@ -53,8 +53,8 @@ public NodesReloadSecureSettingsRequestBuilder setSecureStorePassword(SecureStri public NodesReloadSecureSettingsRequestBuilder source(BytesReference source, XContentType xContentType) throws IOException { Objects.requireNonNull(xContentType); // EMPTY is ok here because we never call namedObject - try (final InputStream stream = source.streamInput(); - final XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, + try (InputStream stream = source.streamInput(); + XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { XContentParser.Token token; token = parser.nextToken(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index c031f6d64f62a..f5dc8b84727c2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -85,7 +85,7 @@ protected NodesReloadSecureSettingsResponse.NodeResponse newNodeResponse() { protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeRequest nodeReloadRequest) { final NodesReloadSecureSettingsRequest request = nodeReloadRequest.request; KeyStoreWrapper keystore = null; - try (final SecureString secureSettingsPassword = request.secureSettingsPassword()) { + try (SecureString secureSettingsPassword = request.secureSettingsPassword()) { // reread keystore from config file keystore = KeyStoreWrapper.load(environment.configFile()); if (keystore == null) { From e1a69dcedfa3c3ede27fbe65c35d303dd67dbb17 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Sat, 16 Jun 2018 23:55:52 +0300 Subject: [PATCH 18/21] Fallout from #31261 in tests --- .../action/admin/ReloadSecureSettingsIT.java | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java b/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java index db3bf14c66347..2061349e3301d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; @@ -62,7 +63,7 @@ public void testMissingKeystoreFile() throws Exception { Files.deleteIfExists(KeyStoreWrapper.keystorePath(environment.configFile())); final int initialReloadCount = mockReloadablePlugin.getReloadCount(); final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword("").execute( + client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute( new ActionListener() { @Override public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { @@ -149,7 +150,7 @@ public void testInvalidKeystoreFile() throws Exception { Files.copy(keystore, KeyStoreWrapper.keystorePath(environment.configFile()), StandardCopyOption.REPLACE_EXISTING); } final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword("").execute( + client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute( new ActionListener() { @Override public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { @@ -191,8 +192,11 @@ public void testWrongKeystorePassword() throws Exception { // "some" keystore should be present in this case writeEmptyKeystore(environment, new char[0]); final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword("Wrong password here").execute( - new ActionListener() { + client().admin() + .cluster() + .prepareReloadSecureSettings() + .setSecureStorePassword(new SecureString(new char[] { 'W', 'r', 'o', 'n', 'g' })) + .execute(new ActionListener() { @Override public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { try { @@ -244,7 +248,7 @@ public void testMisbehavingPlugin() throws Exception { .get(Settings.builder().put(environment.settings()).setSecureSettings(secureSettings).build()) .toString(); final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword("").execute( + client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute( new ActionListener() { @Override public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { @@ -311,7 +315,7 @@ protected Collection> nodePlugins() { private void successfulReloadCall() throws InterruptedException { final AtomicReference reloadSettingsError = new AtomicReference<>(); final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword("").execute( + client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute( new ActionListener() { @Override public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { From 385d7862516f313ac22239ae583e87000bbd2dee Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Sun, 17 Jun 2018 00:25:29 +0300 Subject: [PATCH 19/21] Fallout from javadoc rename --- .../org/elasticsearch/common/util/LazyInitializable.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/util/LazyInitializable.java b/server/src/main/java/org/elasticsearch/common/util/LazyInitializable.java index 6675dccf2c3d2..ad3a3bcc299d0 100644 --- a/server/src/main/java/org/elasticsearch/common/util/LazyInitializable.java +++ b/server/src/main/java/org/elasticsearch/common/util/LazyInitializable.java @@ -65,10 +65,10 @@ public LazyInitializable(CheckedSupplier supplier) { * A {@code Consumer} which is invoked on the value that will be * erased when calling {@code #reset()} */ - public LazyInitializable(CheckedSupplier supplier, Consumer primer, Consumer finalizer) { + public LazyInitializable(CheckedSupplier supplier, Consumer onGet, Consumer onReset) { this.supplier = supplier; - this.onGet = primer; - this.onReset = finalizer; + this.onGet = onGet; + this.onReset = onReset; } /** From e27f0979b0967c9861c01ed40acd08f40e9c8733 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Sun, 17 Jun 2018 09:25:02 +0300 Subject: [PATCH 20/21] Preserve original behavior, azure repo plugin Allow plugin install even if no azure settings are present atm --- .../repositories/azure/AzureRepositoryPlugin.java | 4 ++++ .../repositories/azure/AzureStorageSettings.java | 7 ++----- .../repositories/azure/AzureStorageServiceTests.java | 5 ----- 3 files changed, 6 insertions(+), 10 deletions(-) diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index 1c53422e1902b..f2702b139a69d 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; @@ -68,6 +69,9 @@ public List> getSettings() { public void reload(Settings settings) { // secure settings should be readable final Map clientsSettings = AzureStorageSettings.load(settings); + if (clientsSettings.isEmpty()) { + throw new SettingsException("If you want to use an azure repository, you need to define a client configuration."); + } azureStoreService.refreshAndClearCache(clientsSettings); } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java index 42676f56bf70c..c4e4c1439e45f 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java @@ -195,16 +195,13 @@ public static Map load(Settings settings) { for (final String clientName : ACCOUNT_SETTING.getNamespaces(settings)) { storageSettings.put(clientName, getClientSettings(settings, clientName)); } - if (storageSettings.isEmpty()) { - throw new SettingsException("If you want to use an azure repository, you need to define a client configuration."); - } - if (storageSettings.containsKey("default") == false) { + if (false == storageSettings.containsKey("default") && false == storageSettings.isEmpty()) { // in case no setting named "default" has been set, let's define our "default" // as the first named config we get final AzureStorageSettings defaultSettings = storageSettings.values().iterator().next(); storageSettings.put("default", defaultSettings); } - assert storageSettings.containsKey("default") : "always have 'default'"; + assert storageSettings.containsKey("default") || storageSettings.isEmpty() : "always have 'default' if any"; return Collections.unmodifiableMap(storageSettings); } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java index 3dc943df1c2db..3308db682fece 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java @@ -153,11 +153,6 @@ public void testReinitClientWrongSettings() throws IOException { } } - public void testGetSelectedClientWithNoPrimaryAndSecondary() { - final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceImpl(Settings.EMPTY)); - assertThat(e.getMessage(), is("If you want to use an azure repository, you need to define a client configuration.")); - } - public void testGetSelectedClientNonExisting() { final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(buildSettings()); final SettingsException e = expectThrows(SettingsException.class, () -> azureStorageService.client("azure4")); From 56f741e16ac20c249baabbe2253b45470f0a4aa3 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Sun, 17 Jun 2018 19:17:41 +0300 Subject: [PATCH 21/21] Do not clear keystore password on node requests --- .../reload/TransportNodesReloadSecureSettingsAction.java | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index f5dc8b84727c2..cb870e58d3187 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -84,10 +84,9 @@ protected NodesReloadSecureSettingsResponse.NodeResponse newNodeResponse() { @Override protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeRequest nodeReloadRequest) { final NodesReloadSecureSettingsRequest request = nodeReloadRequest.request; - KeyStoreWrapper keystore = null; - try (SecureString secureSettingsPassword = request.secureSettingsPassword()) { + final SecureString secureSettingsPassword = request.secureSettingsPassword(); + try (KeyStoreWrapper keystore = KeyStoreWrapper.load(environment.configFile())) { // reread keystore from config file - keystore = KeyStoreWrapper.load(environment.configFile()); if (keystore == null) { return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), new IllegalStateException("Keystore is missing")); @@ -114,10 +113,6 @@ protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeReque return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), null); } catch (final Exception e) { return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), e); - } finally { - if (keystore != null) { - keystore.close(); - } } }