diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 0f2a7436b9be6..d9af2f25dcba0 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -242,11 +242,12 @@ class BuildPlugin implements Plugin { compilerJavaHome = findJavaHome(compilerJavaProperty) } if (compilerJavaHome == null) { - if (System.getProperty("idea.active") != null || System.getProperty("eclipse.launcher") != null) { + if (System.getProperty("idea.executable") != null || System.getProperty("eclipse.launcher") != null) { // IntelliJ does not set JAVA_HOME, so we use the JDK that Gradle was run with return Jvm.current().javaHome } else { throw new GradleException( + " " + System.getProperties().toString() + " " + "JAVA_HOME must be set to build Elasticsearch. " + "Note that if the variable was just set you might have to run `./gradlew --stop` for " + "it to be picked up. See https://github.com/elastic/elasticsearch/issues/31399 details." diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java new file mode 100644 index 0000000000000..88456f8dcc095 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java @@ -0,0 +1,303 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.indexlifecycle.DeleteLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleResponse; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyResponse; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusRequest; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusResponse; +import org.elasticsearch.client.indexlifecycle.PutLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyResponse; +import org.elasticsearch.client.indexlifecycle.StartILMRequest; +import org.elasticsearch.client.indexlifecycle.StopILMRequest; + +import java.io.IOException; + +import static java.util.Collections.emptySet; + +public class IndexLifecycleClient { + private final RestHighLevelClient restHighLevelClient; + + IndexLifecycleClient(RestHighLevelClient restHighLevelClient) { + this.restHighLevelClient = restHighLevelClient; + } + + /** + * Retrieve one or more lifecycle policy definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public GetLifecyclePolicyResponse getLifecyclePolicy(GetLifecyclePolicyRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::getLifecyclePolicy, options, + GetLifecyclePolicyResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously retrieve one or more lifecycle policy definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void getLifecyclePolicyAsync(GetLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::getLifecyclePolicy, options, + GetLifecyclePolicyResponse::fromXContent, listener, emptySet()); + } + + /** + * Create or modify a lifecycle definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse putLifecyclePolicy(PutLifecyclePolicyRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::putLifecyclePolicy, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously create or modify a lifecycle definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void putLifecyclePolicyAsync(PutLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::putLifecyclePolicy, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } + + /** + * Delete a lifecycle definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse deleteLifecyclePolicy(DeleteLifecyclePolicyRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::deleteLifecyclePolicy, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously delete a lifecycle definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void deleteLifecyclePolicyAsync(DeleteLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::deleteLifecyclePolicy, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } + + /** + * Remove the index lifecycle policy for an index + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public RemoveIndexLifecyclePolicyResponse removeIndexLifecyclePolicy(RemoveIndexLifecyclePolicyRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::removeIndexLifecyclePolicy, + options, RemoveIndexLifecyclePolicyResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously remove the index lifecycle policy for an index + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void removeIndexLifecyclePolicyAsync(RemoveIndexLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::removeIndexLifecyclePolicy, options, + RemoveIndexLifecyclePolicyResponse::fromXContent, listener, emptySet()); + } + + /** + * Start the Index Lifecycle Management feature. + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse startILM(StartILMRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::startILM, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously start the Index Lifecycle Management feature. + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void startILMAsync(StartILMRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::startILM, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } + + /** + * Stop the Index Lifecycle Management feature. + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse stopILM(StopILMRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::stopILM, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Get the status of index lifecycle management + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public LifecycleManagementStatusResponse lifecycleManagementStatus(LifecycleManagementStatusRequest request, RequestOptions options) + throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::lifecycleManagementStatus, + options, LifecycleManagementStatusResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously get the status of index lifecycle management + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void lifecycleManagementStatusAsync(LifecycleManagementStatusRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::lifecycleManagementStatus, options, + LifecycleManagementStatusResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously stop the Index Lifecycle Management feature. + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void stopILMAsync(StopILMRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::stopILM, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } + + /** + * Explain the lifecycle state for an index + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ExplainLifecycleResponse explainLifecycle(ExplainLifecycleRequest request,RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::explainLifecycle, options, + ExplainLifecycleResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously explain the lifecycle state for an index + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void explainLifecycleAsync(ExplainLifecycleRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::explainLifecycle, options, + ExplainLifecycleResponse::fromXContent, listener, emptySet()); + } + + /** + * Retry lifecycle step for given indices + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse retryLifecycleStep(RetryLifecyclePolicyRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::retryLifecycle, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously retry the lifecycle step for given indices + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void retryLifecycleStepAsync(RetryLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::retryLifecycle, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java new file mode 100644 index 0000000000000..5e185866f8a89 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.indexlifecycle.DeleteLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusRequest; +import org.elasticsearch.client.indexlifecycle.PutLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.StartILMRequest; +import org.elasticsearch.client.indexlifecycle.StopILMRequest; +import org.elasticsearch.common.Strings; + +import java.io.IOException; + +final class IndexLifecycleRequestConverters { + + private IndexLifecycleRequestConverters() {} + + static Request getLifecyclePolicy(GetLifecyclePolicyRequest getLifecyclePolicyRequest) { + String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_ilm/policy") + .addCommaSeparatedPathParts(getLifecyclePolicyRequest.getPolicyNames()).build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(getLifecyclePolicyRequest.masterNodeTimeout()); + params.withTimeout(getLifecyclePolicyRequest.timeout()); + return request; + } + + static Request putLifecyclePolicy(PutLifecyclePolicyRequest putLifecycleRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ilm/policy") + .addPathPartAsIs(putLifecycleRequest.getName()) + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(putLifecycleRequest.masterNodeTimeout()); + params.withTimeout(putLifecycleRequest.timeout()); + request.setEntity(RequestConverters.createEntity(putLifecycleRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deleteLifecyclePolicy(DeleteLifecyclePolicyRequest deleteLifecyclePolicyRequest) { + Request request = new Request(HttpDelete.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ilm/policy") + .addPathPartAsIs(deleteLifecyclePolicyRequest.getLifecyclePolicy()) + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(deleteLifecyclePolicyRequest.masterNodeTimeout()); + params.withTimeout(deleteLifecyclePolicyRequest.timeout()); + return request; + } + + static Request removeIndexLifecyclePolicy(RemoveIndexLifecyclePolicyRequest removePolicyRequest) { + String[] indices = removePolicyRequest.indices() == null ? + Strings.EMPTY_ARRAY : removePolicyRequest.indices().toArray(new String[] {}); + Request request = new Request(HttpDelete.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addCommaSeparatedPathParts(indices) + .addPathPartAsIs("_ilm") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withIndicesOptions(removePolicyRequest.indicesOptions()); + params.withMasterTimeout(removePolicyRequest.masterNodeTimeout()); + return request; + } + + static Request startILM(StartILMRequest startILMRequest) { + Request request = new Request(HttpPost.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ilm") + .addPathPartAsIs("start") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(startILMRequest.masterNodeTimeout()); + params.withTimeout(startILMRequest.timeout()); + return request; + } + + static Request stopILM(StopILMRequest stopILMRequest) { + Request request = new Request(HttpPost.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ilm") + .addPathPartAsIs("stop") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(stopILMRequest.masterNodeTimeout()); + params.withTimeout(stopILMRequest.timeout()); + return request; + } + + static Request lifecycleManagementStatus(LifecycleManagementStatusRequest lifecycleManagementStatusRequest){ + Request request = new Request(HttpGet.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ilm") + .addPathPartAsIs("status") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(lifecycleManagementStatusRequest.masterNodeTimeout()); + params.withTimeout(lifecycleManagementStatusRequest.timeout()); + return request; + } + + static Request explainLifecycle(ExplainLifecycleRequest explainLifecycleRequest) { + String[] indices = explainLifecycleRequest.indices() == null ? Strings.EMPTY_ARRAY : explainLifecycleRequest.indices(); + Request request = new Request(HttpGet.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addCommaSeparatedPathParts(indices) + .addPathPartAsIs("_ilm") + .addPathPartAsIs("explain") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withIndicesOptions(explainLifecycleRequest.indicesOptions()); + params.withMasterTimeout(explainLifecycleRequest.masterNodeTimeout()); + return request; + } + + static Request retryLifecycle(RetryLifecyclePolicyRequest retryLifecyclePolicyRequest) { + Request request = new Request(HttpPost.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addCommaSeparatedPathParts(retryLifecyclePolicyRequest.getIndices()) + .addPathPartAsIs("_ilm") + .addPathPartAsIs("retry") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(retryLifecyclePolicyRequest.masterNodeTimeout()); + params.withTimeout(retryLifecyclePolicyRequest.timeout()); + return request; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index aae2cfccf521a..ec19907e9d3ab 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -84,6 +84,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.Charset; +import java.util.List; import java.util.Locale; import java.util.StringJoiner; @@ -1022,7 +1023,12 @@ EndpointBuilder addCommaSeparatedPathParts(String[] parts) { return this; } - EndpointBuilder addPathPartAsIs(String... parts) { + EndpointBuilder addCommaSeparatedPathParts(List parts) { + addPathPart(String.join(",", parts)); + return this; + } + + EndpointBuilder addPathPartAsIs(String ... parts) { for (String part : parts) { if (Strings.hasLength(part)) { joiner.add(part); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 31b633717f48c..45ba0f8b46028 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -219,6 +219,7 @@ public class RestHighLevelClient implements Closeable { private final WatcherClient watcherClient = new WatcherClient(this); private final GraphClient graphClient = new GraphClient(this); private final LicenseClient licenseClient = new LicenseClient(this); + private final IndexLifecycleClient indexLifecycleClient = new IndexLifecycleClient(this); private final MigrationClient migrationClient = new MigrationClient(this); private final MachineLearningClient machineLearningClient = new MachineLearningClient(this); private final SecurityClient securityClient = new SecurityClient(this); @@ -368,6 +369,16 @@ public final XPackClient xpack() { */ public LicenseClient license() { return licenseClient; } + /** + * Provides methods for accessing the Elastic Licensed Index Lifecycle APIs that are shipped with the default distribution of + * Elasticsearch. All of these APIs will 404 if run against the OSS distribution of Elasticsearch. + *

+ * See the X-Pack APIs on elastic.co for more information. + */ + public IndexLifecycleClient indexLifecycle() { + return indexLifecycleClient; + } + /** * Provides methods for accessing the Elastic Licensed Licensing APIs that * are shipped with the default distribution of Elasticsearch. All of diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java index c26a7ba48ca17..60ecea39ae093 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java @@ -28,7 +28,7 @@ * Please note, any requests that use a ackTimeout should set timeout as they * represent the same backing field on the server. */ -public class TimedRequest implements Validatable { +public abstract class TimedRequest implements Validatable { public static final TimeValue DEFAULT_ACK_TIMEOUT = timeValueSeconds(30); public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java index a20dfd1ba328a..1e8a5328355a0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java @@ -94,4 +94,5 @@ public void usageAsync(XPackUsageRequest request, RequestOptions options, Action restHighLevelClient.performRequestAsyncAndParseEntity(request, XPackRequestConverters::usage, options, XPackUsageResponse::fromXContent, listener, emptySet()); } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/AllocateAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/AllocateAction.java new file mode 100644 index 0000000000000..702db15b965c7 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/AllocateAction.java @@ -0,0 +1,147 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +public class AllocateAction implements LifecycleAction, ToXContentObject { + + public static final String NAME = "allocate"; + static final ParseField NUMBER_OF_REPLICAS_FIELD = new ParseField("number_of_replicas"); + static final ParseField INCLUDE_FIELD = new ParseField("include"); + static final ParseField EXCLUDE_FIELD = new ParseField("exclude"); + static final ParseField REQUIRE_FIELD = new ParseField("require"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + a -> new AllocateAction((Integer) a[0], (Map) a[1], (Map) a[2], (Map) a[3])); + + static { + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), NUMBER_OF_REPLICAS_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.mapStrings(), INCLUDE_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.mapStrings(), EXCLUDE_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.mapStrings(), REQUIRE_FIELD); + } + + private final Integer numberOfReplicas; + private final Map include; + private final Map exclude; + private final Map require; + + public static AllocateAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public AllocateAction(Integer numberOfReplicas, Map include, Map exclude, Map require) { + if (include == null) { + this.include = Collections.emptyMap(); + } else { + this.include = include; + } + if (exclude == null) { + this.exclude = Collections.emptyMap(); + } else { + this.exclude = exclude; + } + if (require == null) { + this.require = Collections.emptyMap(); + } else { + this.require = require; + } + if (this.include.isEmpty() && this.exclude.isEmpty() && this.require.isEmpty() && numberOfReplicas == null) { + throw new IllegalArgumentException( + "At least one of " + INCLUDE_FIELD.getPreferredName() + ", " + EXCLUDE_FIELD.getPreferredName() + " or " + + REQUIRE_FIELD.getPreferredName() + "must contain attributes for action " + NAME); + } + if (numberOfReplicas != null && numberOfReplicas < 0) { + throw new IllegalArgumentException("[" + NUMBER_OF_REPLICAS_FIELD.getPreferredName() + "] must be >= 0"); + } + this.numberOfReplicas = numberOfReplicas; + } + + public Integer getNumberOfReplicas() { + return numberOfReplicas; + } + + public Map getInclude() { + return include; + } + + public Map getExclude() { + return exclude; + } + + public Map getRequire() { + return require; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + if (numberOfReplicas != null) { + builder.field(NUMBER_OF_REPLICAS_FIELD.getPreferredName(), numberOfReplicas); + } + builder.field(INCLUDE_FIELD.getPreferredName(), include); + builder.field(EXCLUDE_FIELD.getPreferredName(), exclude); + builder.field(REQUIRE_FIELD.getPreferredName(), require); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(numberOfReplicas, include, exclude, require); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + AllocateAction other = (AllocateAction) obj; + return Objects.equals(numberOfReplicas, other.numberOfReplicas) && + Objects.equals(include, other.include) && + Objects.equals(exclude, other.exclude) && + Objects.equals(require, other.require); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteAction.java new file mode 100644 index 0000000000000..299b0ac582771 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteAction.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class DeleteAction implements LifecycleAction, ToXContentObject { + public static final String NAME = "delete"; + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, DeleteAction::new); + + public static DeleteAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public DeleteAction() { + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public int hashCode() { + return 1; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..fc029f37ac928 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequest.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; +import org.elasticsearch.common.Strings; + +import java.util.Objects; + +public class DeleteLifecyclePolicyRequest extends TimedRequest { + + private final String lifecyclePolicy; + + public DeleteLifecyclePolicyRequest(String lifecyclePolicy) { + if (Strings.isNullOrEmpty(lifecyclePolicy)) { + throw new IllegalArgumentException("lifecycle name must be present"); + } + this.lifecyclePolicy = lifecyclePolicy; + } + + public String getLifecyclePolicy() { + return lifecyclePolicy; + } + + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + DeleteLifecyclePolicyRequest that = (DeleteLifecyclePolicyRequest) o; + return Objects.equals(getLifecyclePolicy(), that.getLifecyclePolicy()); + } + + @Override + public int hashCode() { + return Objects.hash(getLifecyclePolicy()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequest.java new file mode 100644 index 0000000000000..9d9e80bf1eeee --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequest.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.TimedRequest; +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.Strings; + +import java.util.Arrays; +import java.util.Objects; +import java.util.Optional; + +/** + * The request object used by the Explain Lifecycle API. + * + * Multiple indices may be queried in the same request using the + * {@link #indices(String...)} method + */ +public class ExplainLifecycleRequest extends TimedRequest { + + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + + public ExplainLifecycleRequest() { + super(); + } + + public ExplainLifecycleRequest indices(String... indices) { + this.indices = indices; + return this; + } + + public String[] indices() { + return indices; + } + + public ExplainLifecycleRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + @Override + public Optional validate() { + return Optional.empty(); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indices()), indicesOptions()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ExplainLifecycleRequest other = (ExplainLifecycleRequest) obj; + return Objects.deepEquals(indices(), other.indices()) && + Objects.equals(indicesOptions(), other.indicesOptions()); + } + + @Override + public String toString() { + return "ExplainLifecycleRequest [indices()=" + Arrays.toString(indices()) + ", indicesOptions()=" + indicesOptions() + "]"; + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponse.java new file mode 100644 index 0000000000000..de2803afe5415 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponse.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * The response object returned by the Explain Lifecycle API. + * + * Since the API can be run over multiple indices the response provides a map of + * index to the explanation of the lifecycle status for that index. + */ +public class ExplainLifecycleResponse implements ToXContentObject { + + private static final ParseField INDICES_FIELD = new ParseField("indices"); + + private Map indexResponses; + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "explain_lifecycle_response", a -> new ExplainLifecycleResponse(((List) a[0]).stream() + .collect(Collectors.toMap(IndexLifecycleExplainResponse::getIndex, Function.identity())))); + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> IndexLifecycleExplainResponse.PARSER.apply(p, c), + INDICES_FIELD); + } + + public static ExplainLifecycleResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public ExplainLifecycleResponse(Map indexResponses) { + this.indexResponses = indexResponses; + } + + /** + * @return a map of the responses from each requested index. The maps key is + * the index name and the value is the + * {@link IndexLifecycleExplainResponse} describing the current + * lifecycle status of that index + */ + public Map getIndexResponses() { + return indexResponses; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject(INDICES_FIELD.getPreferredName()); + for (IndexLifecycleExplainResponse indexResponse : indexResponses.values()) { + builder.field(indexResponse.getIndex(), indexResponse); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(indexResponses); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ExplainLifecycleResponse other = (ExplainLifecycleResponse) obj; + return Objects.equals(indexResponses, other.indexResponses); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ForceMergeAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ForceMergeAction.java new file mode 100644 index 0000000000000..eb564b7cd27b6 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ForceMergeAction.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class ForceMergeAction implements LifecycleAction, ToXContentObject { + public static final String NAME = "forcemerge"; + private static final ParseField MAX_NUM_SEGMENTS_FIELD = new ParseField("max_num_segments"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + false, a -> { + int maxNumSegments = (int) a[0]; + return new ForceMergeAction(maxNumSegments); + }); + + static { + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_NUM_SEGMENTS_FIELD); + } + + private final int maxNumSegments; + + public static ForceMergeAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public ForceMergeAction(int maxNumSegments) { + if (maxNumSegments <= 0) { + throw new IllegalArgumentException("[" + MAX_NUM_SEGMENTS_FIELD.getPreferredName() + + "] must be a positive integer"); + } + this.maxNumSegments = maxNumSegments; + } + + public int getMaxNumSegments() { + return maxNumSegments; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MAX_NUM_SEGMENTS_FIELD.getPreferredName(), maxNumSegments); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(maxNumSegments); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ForceMergeAction other = (ForceMergeAction) obj; + return Objects.equals(maxNumSegments, other.maxNumSegments); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..af17a3ea48cf9 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequest.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; +import org.elasticsearch.common.Strings; + +import java.util.Arrays; + +public class GetLifecyclePolicyRequest extends TimedRequest { + + private final String[] policyNames; + + public GetLifecyclePolicyRequest(String... policyNames) { + if (policyNames == null) { + this.policyNames = Strings.EMPTY_ARRAY; + } else { + for (String name : policyNames) { + if (name == null) { + throw new IllegalArgumentException("cannot include null policy name"); + } + } + this.policyNames = policyNames; + } + } + + public String[] getPolicyNames() { + return policyNames; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetLifecyclePolicyRequest request = (GetLifecyclePolicyRequest) o; + return Arrays.equals(getPolicyNames(), request.getPolicyNames()); + } + + @Override + public int hashCode() { + return Arrays.hashCode(getPolicyNames()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponse.java new file mode 100644 index 0000000000000..fc007cb5aebd4 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponse.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.client.indexlifecycle; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + +public class GetLifecyclePolicyResponse implements ToXContentObject { + + private final ImmutableOpenMap policies; + + public GetLifecyclePolicyResponse(ImmutableOpenMap policies) { + this.policies = policies; + } + + public ImmutableOpenMap getPolicies() { + return policies; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + for (ObjectObjectCursor stringLifecyclePolicyObjectObjectCursor : policies) { + builder.field(stringLifecyclePolicyObjectObjectCursor.key, stringLifecyclePolicyObjectObjectCursor.value); + } + builder.endObject(); + return builder; + } + + public static GetLifecyclePolicyResponse fromXContent(XContentParser parser) throws IOException { + ImmutableOpenMap.Builder policies = ImmutableOpenMap.builder(); + + if (parser.currentToken() == null) { + parser.nextToken(); + } + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); + parser.nextToken(); + + while (!parser.isClosed()) { + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + String policyName = parser.currentName(); + LifecyclePolicyMetadata policyDefinion = LifecyclePolicyMetadata.parse(parser, policyName); + policies.put(policyName, policyDefinion); + } else { + parser.nextToken(); + } + } + + return new GetLifecyclePolicyResponse(policies.build()); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetLifecyclePolicyResponse that = (GetLifecyclePolicyResponse) o; + return Objects.equals(getPolicies(), that.getPolicies()); + } + + @Override + public int hashCode() { + return Objects.hash(getPolicies()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleExplainResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleExplainResponse.java new file mode 100644 index 0000000000000..5e4a36739d28b --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleExplainResponse.java @@ -0,0 +1,277 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.joda.time.DateTime; +import org.joda.time.chrono.ISOChronology; + +import java.io.IOException; +import java.util.Objects; + +public class IndexLifecycleExplainResponse implements ToXContentObject { + + private static final ParseField INDEX_FIELD = new ParseField("index"); + private static final ParseField MANAGED_BY_ILM_FIELD = new ParseField("managed"); + private static final ParseField POLICY_NAME_FIELD = new ParseField("policy"); + private static final ParseField LIFECYCLE_DATE_FIELD = new ParseField("lifecycle_date"); + private static final ParseField PHASE_FIELD = new ParseField("phase"); + private static final ParseField ACTION_FIELD = new ParseField("action"); + private static final ParseField STEP_FIELD = new ParseField("step"); + private static final ParseField FAILED_STEP_FIELD = new ParseField("failed_step"); + private static final ParseField PHASE_TIME_FIELD = new ParseField("phase_time"); + private static final ParseField ACTION_TIME_FIELD = new ParseField("action_time"); + private static final ParseField STEP_TIME_FIELD = new ParseField("step_time"); + private static final ParseField STEP_INFO_FIELD = new ParseField("step_info"); + private static final ParseField PHASE_EXECUTION_INFO = new ParseField("phase_execution"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "index_lifecycle_explain_response", + a -> new IndexLifecycleExplainResponse( + (String) a[0], + (boolean) a[1], + (String) a[2], + (long) (a[3] == null ? -1L: a[3]), + (String) a[4], + (String) a[5], + (String) a[6], + (String) a[7], + (long) (a[8] == null ? -1L: a[8]), + (long) (a[9] == null ? -1L: a[9]), + (long) (a[10] == null ? -1L: a[10]), + (BytesReference) a[11], + (PhaseExecutionInfo) a[12])); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_FIELD); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), MANAGED_BY_ILM_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), POLICY_NAME_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), LIFECYCLE_DATE_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), PHASE_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), ACTION_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), STEP_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FAILED_STEP_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), PHASE_TIME_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), ACTION_TIME_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), STEP_TIME_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.copyCurrentStructure(p); + return BytesArray.bytes(builder); + }, STEP_INFO_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> PhaseExecutionInfo.parse(p, ""), + PHASE_EXECUTION_INFO); + } + + private final String index; + private final String policyName; + private final String phase; + private final String action; + private final String step; + private final String failedStep; + private final long lifecycleDate; + private final long phaseTime; + private final long actionTime; + private final long stepTime; + private final boolean managedByILM; + private final BytesReference stepInfo; + private final PhaseExecutionInfo phaseExecutionInfo; + + public static IndexLifecycleExplainResponse newManagedIndexResponse(String index, String policyName, long lifecycleDate, + String phase, String action, String step, String failedStep, + long phaseTime, long actionTime, long stepTime, + BytesReference stepInfo, PhaseExecutionInfo phaseExecutionInfo) { + return new IndexLifecycleExplainResponse(index, true, policyName, lifecycleDate, phase, action, step, failedStep, phaseTime, + actionTime, stepTime, stepInfo, phaseExecutionInfo); + } + + public static IndexLifecycleExplainResponse newUnmanagedIndexResponse(String index) { + return new IndexLifecycleExplainResponse(index, false, null, -1L, null, null, null, null, -1L, -1L, -1L, null, null); + } + + private IndexLifecycleExplainResponse(String index, boolean managedByILM, String policyName, long lifecycleDate, + String phase, String action, String step, String failedStep, long phaseTime, long actionTime, + long stepTime, BytesReference stepInfo, PhaseExecutionInfo phaseExecutionInfo) { + if (managedByILM) { + if (policyName == null) { + throw new IllegalArgumentException("[" + POLICY_NAME_FIELD.getPreferredName() + "] cannot be null for managed index"); + } + } else { + if (policyName != null || lifecycleDate >= 0 || phase != null || action != null || step != null || failedStep != null + || phaseTime >= 0 || actionTime >= 0 || stepTime >= 0 || stepInfo != null || phaseExecutionInfo != null) { + throw new IllegalArgumentException( + "Unmanaged index response must only contain fields: [" + MANAGED_BY_ILM_FIELD + ", " + INDEX_FIELD + "]"); + } + } + this.index = index; + this.policyName = policyName; + this.managedByILM = managedByILM; + this.lifecycleDate = lifecycleDate; + this.phase = phase; + this.action = action; + this.step = step; + this.phaseTime = phaseTime; + this.actionTime = actionTime; + this.stepTime = stepTime; + this.failedStep = failedStep; + this.stepInfo = stepInfo; + this.phaseExecutionInfo = phaseExecutionInfo; + } + + public String getIndex() { + return index; + } + + public boolean managedByILM() { + return managedByILM; + } + + public String getPolicyName() { + return policyName; + } + + public long getLifecycleDate() { + return lifecycleDate; + } + + public String getPhase() { + return phase; + } + + public long getPhaseTime() { + return phaseTime; + } + + public String getAction() { + return action; + } + + public long getActionTime() { + return actionTime; + } + + public String getStep() { + return step; + } + + public long getStepTime() { + return stepTime; + } + + public String getFailedStep() { + return failedStep; + } + + public BytesReference getStepInfo() { + return stepInfo; + } + + public PhaseExecutionInfo getPhaseExecutionInfo() { + return phaseExecutionInfo; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(INDEX_FIELD.getPreferredName(), index); + builder.field(MANAGED_BY_ILM_FIELD.getPreferredName(), managedByILM); + if (managedByILM) { + builder.field(POLICY_NAME_FIELD.getPreferredName(), policyName); + if (builder.humanReadable()) { + builder.field(LIFECYCLE_DATE_FIELD.getPreferredName(), new DateTime(lifecycleDate, ISOChronology.getInstanceUTC())); + } else { + builder.field(LIFECYCLE_DATE_FIELD.getPreferredName(), lifecycleDate); + } + builder.field(PHASE_FIELD.getPreferredName(), phase); + if (builder.humanReadable()) { + builder.field(PHASE_TIME_FIELD.getPreferredName(), new DateTime(phaseTime, ISOChronology.getInstanceUTC())); + } else { + builder.field(PHASE_TIME_FIELD.getPreferredName(), phaseTime); + } + builder.field(ACTION_FIELD.getPreferredName(), action); + if (builder.humanReadable()) { + builder.field(ACTION_TIME_FIELD.getPreferredName(), new DateTime(actionTime, ISOChronology.getInstanceUTC())); + } else { + builder.field(ACTION_TIME_FIELD.getPreferredName(), actionTime); + } + builder.field(STEP_FIELD.getPreferredName(), step); + if (builder.humanReadable()) { + builder.field(STEP_TIME_FIELD.getPreferredName(), new DateTime(stepTime, ISOChronology.getInstanceUTC())); + } else { + builder.field(STEP_TIME_FIELD.getPreferredName(), stepTime); + } + if (Strings.hasLength(failedStep)) { + builder.field(FAILED_STEP_FIELD.getPreferredName(), failedStep); + } + if (stepInfo != null && stepInfo.length() > 0) { + builder.rawField(STEP_INFO_FIELD.getPreferredName(), stepInfo.streamInput(), XContentType.JSON); + } + if (phaseExecutionInfo != null) { + builder.field(PHASE_EXECUTION_INFO.getPreferredName(), phaseExecutionInfo); + } + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(index, managedByILM, policyName, lifecycleDate, phase, action, step, failedStep, phaseTime, actionTime, + stepTime, stepInfo, phaseExecutionInfo); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + IndexLifecycleExplainResponse other = (IndexLifecycleExplainResponse) obj; + return Objects.equals(index, other.index) && + Objects.equals(managedByILM, other.managedByILM) && + Objects.equals(policyName, other.policyName) && + Objects.equals(lifecycleDate, other.lifecycleDate) && + Objects.equals(phase, other.phase) && + Objects.equals(action, other.action) && + Objects.equals(step, other.step) && + Objects.equals(failedStep, other.failedStep) && + Objects.equals(phaseTime, other.phaseTime) && + Objects.equals(actionTime, other.actionTime) && + Objects.equals(stepTime, other.stepTime) && + Objects.equals(stepInfo, other.stepInfo) && + Objects.equals(phaseExecutionInfo, other.phaseExecutionInfo); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + +} + diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleNamedXContentProvider.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleNamedXContentProvider.java new file mode 100644 index 0000000000000..22935f197731c --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleNamedXContentProvider.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.plugins.spi.NamedXContentProvider; + +import java.util.Arrays; +import java.util.List; + +public class IndexLifecycleNamedXContentProvider implements NamedXContentProvider { + + + @Override + public List getNamedXContentParsers() { + return Arrays.asList( + // ILM + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(AllocateAction.NAME), + AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(DeleteAction.NAME), + DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(ForceMergeAction.NAME), + ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(ReadOnlyAction.NAME), + ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(RolloverAction.NAME), + RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(ShrinkAction.NAME), + ShrinkAction::parse) + ); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleAction.java new file mode 100644 index 0000000000000..3787d26f5f889 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleAction.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +/** + * interface for index lifecycle management actions + */ +public interface LifecycleAction { + + /** + * @return the name of this action + */ + String getName(); +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusRequest.java new file mode 100644 index 0000000000000..5db3d2d8c4e11 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusRequest.java @@ -0,0 +1,28 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; + +/** + * A {@link TimedRequest} to get the current status of index lifecycle management. + */ +public class LifecycleManagementStatusRequest extends TimedRequest { +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponse.java new file mode 100644 index 0000000000000..c1586d7e1c738 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponse.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.Objects; + +/** + * The current status of index lifecycle management. See {@link OperationMode} for available statuses. + */ +public class LifecycleManagementStatusResponse { + + private final OperationMode operationMode; + private static final String OPERATION_MODE = "operation_mode"; + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + OPERATION_MODE, a -> new LifecycleManagementStatusResponse((String) a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField(OPERATION_MODE)); + } + + //package private for testing + LifecycleManagementStatusResponse(String operationMode) { + this.operationMode = OperationMode.fromString(operationMode); + } + + public OperationMode getOperationMode() { + return operationMode; + } + + public static LifecycleManagementStatusResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + LifecycleManagementStatusResponse that = (LifecycleManagementStatusResponse) o; + return operationMode == that.operationMode; + } + + @Override + public int hashCode() { + return Objects.hash(operationMode); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicy.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicy.java new file mode 100644 index 0000000000000..2dc4e3644d1e4 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicy.java @@ -0,0 +1,145 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Represents the lifecycle of an index from creation to deletion. A + * {@link LifecyclePolicy} is made up of a set of {@link Phase}s which it will + * move through. + */ +public class LifecyclePolicy implements ToXContentObject { + static final ParseField PHASES_FIELD = new ParseField("phases"); + + @SuppressWarnings("unchecked") + public static ConstructingObjectParser PARSER = new ConstructingObjectParser<>("lifecycle_policy", false, + (a, name) -> { + List phases = (List) a[0]; + Map phaseMap = phases.stream().collect(Collectors.toMap(Phase::getName, Function.identity())); + return new LifecyclePolicy(name, phaseMap); + }); + private static Map> ALLOWED_ACTIONS = new HashMap<>(); + + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> Phase.parse(p, n), v -> { + throw new IllegalArgumentException("ordered " + PHASES_FIELD.getPreferredName() + " are not supported"); + }, PHASES_FIELD); + + ALLOWED_ACTIONS.put("hot", Sets.newHashSet(RolloverAction.NAME)); + ALLOWED_ACTIONS.put("warm", Sets.newHashSet(AllocateAction.NAME, ForceMergeAction.NAME, ReadOnlyAction.NAME, ShrinkAction.NAME)); + ALLOWED_ACTIONS.put("cold", Sets.newHashSet(AllocateAction.NAME)); + ALLOWED_ACTIONS.put("delete", Sets.newHashSet(DeleteAction.NAME)); + } + + private final String name; + private final Map phases; + + /** + * @param name + * the name of this {@link LifecyclePolicy} + * @param phases + * a {@link Map} of {@link Phase}s which make up this + * {@link LifecyclePolicy}. + */ + public LifecyclePolicy(String name, Map phases) { + phases.values().forEach(phase -> { + if (ALLOWED_ACTIONS.containsKey(phase.getName()) == false) { + throw new IllegalArgumentException("Lifecycle does not support phase [" + phase.getName() + "]"); + } + phase.getActions().forEach((actionName, action) -> { + if (ALLOWED_ACTIONS.get(phase.getName()).contains(actionName) == false) { + throw new IllegalArgumentException("invalid action [" + actionName + "] " + + "defined in phase [" + phase.getName() +"]"); + } + }); + }); + this.name = name; + this.phases = phases; + } + + public static LifecyclePolicy parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + /** + * @return the name of this {@link LifecyclePolicy} + */ + public String getName() { + return name; + } + + /** + * @return the {@link Phase}s for this {@link LifecyclePolicy} in the order + * in which they will be executed. + */ + public Map getPhases() { + return phases; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject(PHASES_FIELD.getPreferredName()); + for (Phase phase : phases.values()) { + builder.field(phase.getName(), phase); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(name, phases); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + LifecyclePolicy other = (LifecyclePolicy) obj; + return Objects.equals(name, other.name) && + Objects.equals(phases, other.phases); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadata.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadata.java new file mode 100644 index 0000000000000..84de81437065d --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadata.java @@ -0,0 +1,117 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.Objects; + +public class LifecyclePolicyMetadata implements ToXContentObject { + + static final ParseField POLICY = new ParseField("policy"); + static final ParseField VERSION = new ParseField("version"); + static final ParseField MODIFIED_DATE = new ParseField("modified_date"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("policy_metadata", + a -> { + LifecyclePolicy policy = (LifecyclePolicy) a[0]; + return new LifecyclePolicyMetadata(policy, (long) a[1], ZonedDateTime.parse((String) a[2]).toInstant().toEpochMilli()); + }); + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), LifecyclePolicy::parse, POLICY); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), VERSION); + PARSER.declareString(ConstructingObjectParser.constructorArg(), MODIFIED_DATE); + } + + public static LifecyclePolicyMetadata parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + private final LifecyclePolicy policy; + private final long version; + private final long modifiedDate; + + public LifecyclePolicyMetadata(LifecyclePolicy policy, long version, long modifiedDate) { + this.policy = policy; + this.version = version; + this.modifiedDate = modifiedDate; + } + + public LifecyclePolicy getPolicy() { + return policy; + } + + public String getName() { + return policy.getName(); + } + + public long getVersion() { + return version; + } + + public long getModifiedDate() { + return modifiedDate; + } + + public String getModifiedDateString() { + ZonedDateTime modifiedDateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(modifiedDate), ZoneOffset.UTC); + return modifiedDateTime.toString(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(POLICY.getPreferredName(), policy); + builder.field(VERSION.getPreferredName(), version); + builder.field(MODIFIED_DATE.getPreferredName(), + ZonedDateTime.ofInstant(Instant.ofEpochMilli(modifiedDate), ZoneOffset.UTC).toString()); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(policy, version, modifiedDate); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + LifecyclePolicyMetadata other = (LifecyclePolicyMetadata) obj; + return Objects.equals(policy, other.policy) && + Objects.equals(version, other.version) && + Objects.equals(modifiedDate, other.modifiedDate); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/OperationMode.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/OperationMode.java new file mode 100644 index 0000000000000..81634e5824ec8 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/OperationMode.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.action.admin.indices.shrink.ShrinkAction; + +import java.util.EnumSet; +import java.util.Locale; + +/** + * Enum representing the different modes that Index Lifecycle Service can operate in. + */ +public enum OperationMode { + /** + * This represents a state where no policies are executed + */ + STOPPED { + @Override + public boolean isValidChange(OperationMode nextMode) { + return nextMode == RUNNING; + } + }, + + /** + * this represents a state where only sensitive actions (like {@link ShrinkAction}) will be executed + * until they finish, at which point the operation mode will move to STOPPED. + */ + STOPPING { + @Override + public boolean isValidChange(OperationMode nextMode) { + return nextMode == RUNNING || nextMode == STOPPED; + } + }, + + /** + * Normal operation where all policies are executed as normal. + */ + RUNNING { + @Override + public boolean isValidChange(OperationMode nextMode) { + return nextMode == STOPPING; + } + }; + + public abstract boolean isValidChange(OperationMode nextMode); + + static OperationMode fromString(String string) { + return EnumSet.allOf(OperationMode.class).stream() + .filter(e -> string.equalsIgnoreCase(e.name())).findFirst() + .orElseThrow(() -> new IllegalArgumentException(String.format(Locale.ROOT, "%s is not a valid operation_mode", string))); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/Phase.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/Phase.java new file mode 100644 index 0000000000000..0c19d39c85964 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/Phase.java @@ -0,0 +1,143 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Represents set of {@link LifecycleAction}s which should be executed at a + * particular point in the lifecycle of an index. + */ +public class Phase implements ToXContentObject { + + static final ParseField MIN_AGE = new ParseField("min_age"); + static final ParseField ACTIONS_FIELD = new ParseField("actions"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("phase", false, + (a, name) -> new Phase(name, (TimeValue) a[0], ((List) a[1]).stream() + .collect(Collectors.toMap(LifecycleAction::getName, Function.identity())))); + static { + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), MIN_AGE.getPreferredName()), MIN_AGE, ValueType.VALUE); + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), + (p, c, n) -> p.namedObject(LifecycleAction.class, n, null), v -> { + throw new IllegalArgumentException("ordered " + ACTIONS_FIELD.getPreferredName() + " are not supported"); + }, ACTIONS_FIELD); + } + + public static Phase parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + private final String name; + private final Map actions; + private final TimeValue minimumAge; + + /** + * @param name + * the name of this {@link Phase}. + * @param minimumAge + * the age of the index when the index should move to this + * {@link Phase}. + * @param actions + * a {@link Map} of the {@link LifecycleAction}s to run when + * during this {@link Phase}. The keys in this map are the associated + * action names. + */ + public Phase(String name, TimeValue minimumAge, Map actions) { + this.name = name; + if (minimumAge == null) { + this.minimumAge = TimeValue.ZERO; + } else { + this.minimumAge = minimumAge; + } + this.actions = actions; + } + + /** + * @return the age of the index when the index should move to this + * {@link Phase}. + */ + public TimeValue getMinimumAge() { + return minimumAge; + } + + /** + * @return the name of this {@link Phase} + */ + public String getName() { + return name; + } + + /** + * @return a {@link Map} of the {@link LifecycleAction}s to run when during + * this {@link Phase}. + */ + public Map getActions() { + return actions; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MIN_AGE.getPreferredName(), minimumAge.getStringRep()); + builder.field(ACTIONS_FIELD.getPreferredName(), actions); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(name, minimumAge, actions); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Phase other = (Phase) obj; + return Objects.equals(name, other.name) && + Objects.equals(minimumAge, other.minimumAge) && + Objects.equals(actions, other.actions); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfo.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfo.java new file mode 100644 index 0000000000000..802ca8834cdd3 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfo.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * This class contains information about the current phase being executed by Index + * Lifecycle Management on the specific index. + */ +public class PhaseExecutionInfo implements ToXContentObject { + private static final ParseField POLICY_NAME_FIELD = new ParseField("policy"); + private static final ParseField PHASE_DEFINITION_FIELD = new ParseField("phase_definition"); + private static final ParseField VERSION_FIELD = new ParseField("version"); + private static final ParseField MODIFIED_DATE_IN_MILLIS_FIELD = new ParseField("modified_date_in_millis"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "phase_execution_info", false, + (a, name) -> new PhaseExecutionInfo((String) a[0], (Phase) a[1], (long) a[2], (long) a[3])); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), POLICY_NAME_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), Phase::parse, PHASE_DEFINITION_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), VERSION_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), MODIFIED_DATE_IN_MILLIS_FIELD); + } + + public static PhaseExecutionInfo parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + private final String policyName; + private final Phase phase; + private final long version; + private final long modifiedDate; + + /** + * This class holds information about the current phase that is being executed + * + * @param policyName the name of the policy being executed, this may not be the current policy assigned to an index + * @param phase the current phase definition executed + * @param version the version of the policyName being executed + * @param modifiedDate the time the executing version of the phase was modified + */ + public PhaseExecutionInfo(String policyName, Phase phase, long version, long modifiedDate) { + this.policyName = policyName; + this.phase = phase; + this.version = version; + this.modifiedDate = modifiedDate; + } + + public String getPolicyName() { + return policyName; + } + + public Phase getPhase() { + return phase; + } + + public long getVersion() { + return version; + } + + public long getModifiedDate() { + return modifiedDate; + } + + @Override + public int hashCode() { + return Objects.hash(policyName, phase, version, modifiedDate); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PhaseExecutionInfo other = (PhaseExecutionInfo) obj; + return Objects.equals(policyName, other.policyName) && + Objects.equals(phase, other.phase) && + Objects.equals(version, other.version) && + Objects.equals(modifiedDate, other.modifiedDate); + } + + @Override + public String toString() { + return Strings.toString(this, false, true); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(POLICY_NAME_FIELD.getPreferredName(), policyName); + if (phase != null) { + builder.field(PHASE_DEFINITION_FIELD.getPreferredName(), phase); + } + builder.field(VERSION_FIELD.getPreferredName(), version); + builder.timeField(MODIFIED_DATE_IN_MILLIS_FIELD.getPreferredName(), "modified_date", modifiedDate); + builder.endObject(); + return builder; + } +} + diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..ddfcc6bf6e65a --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequest.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public class PutLifecyclePolicyRequest extends TimedRequest implements ToXContentObject { + + private final LifecyclePolicy policy; + + public PutLifecyclePolicyRequest(LifecyclePolicy policy) { + if (policy == null) { + throw new IllegalArgumentException("policy definition cannot be null"); + } + if (Strings.isNullOrEmpty(policy.getName())) { + throw new IllegalArgumentException("policy name must be present"); + } + this.policy = policy; + } + + public String getName() { + return policy.getName(); + } + + public LifecyclePolicy getLifecyclePolicy() { + return policy; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("policy", policy); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PutLifecyclePolicyRequest that = (PutLifecyclePolicyRequest) o; + return Objects.equals(getLifecyclePolicy(), that.getLifecyclePolicy()); + } + + @Override + public int hashCode() { + return Objects.hash(getLifecyclePolicy()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ReadOnlyAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ReadOnlyAction.java new file mode 100644 index 0000000000000..7734e792bbc5b --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ReadOnlyAction.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class ReadOnlyAction implements LifecycleAction, ToXContentObject { + public static final String NAME = "readonly"; + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, false, ReadOnlyAction::new); + + public static ReadOnlyAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public ReadOnlyAction() { + } + + @Override + public String getName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return ReadOnlyAction.class.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..88bdf4dd6868d --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequest.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.TimedRequest; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +public class RemoveIndexLifecyclePolicyRequest extends TimedRequest { + + private final List indices; + private final IndicesOptions indicesOptions; + + public RemoveIndexLifecyclePolicyRequest(List indices) { + this(indices, IndicesOptions.strictExpandOpen()); + } + + public RemoveIndexLifecyclePolicyRequest(List indices, IndicesOptions indicesOptions) { + this.indices = Collections.unmodifiableList(Objects.requireNonNull(indices)); + this.indicesOptions = Objects.requireNonNull(indicesOptions); + } + + public List indices() { + return indices; + } + + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + @Override + public int hashCode() { + return Objects.hash(indices, indicesOptions); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RemoveIndexLifecyclePolicyRequest other = (RemoveIndexLifecyclePolicyRequest) obj; + return Objects.deepEquals(indices, other.indices) && + Objects.equals(indicesOptions, other.indicesOptions); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponse.java new file mode 100644 index 0000000000000..3aae1537faa29 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponse.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +public class RemoveIndexLifecyclePolicyResponse { + + public static final ParseField HAS_FAILURES_FIELD = new ParseField("has_failures"); + public static final ParseField FAILED_INDEXES_FIELD = new ParseField("failed_indexes"); + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "change_policy_for_index_response", true, args -> new RemoveIndexLifecyclePolicyResponse((List)args[0])); + static { + PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), FAILED_INDEXES_FIELD); + // Needs to be declared but not used in constructing the response object + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), HAS_FAILURES_FIELD); + } + + private final List failedIndexes; + + public RemoveIndexLifecyclePolicyResponse(List failedIndexes) { + if (failedIndexes == null) { + throw new IllegalArgumentException(FAILED_INDEXES_FIELD.getPreferredName() + " cannot be null"); + } + this.failedIndexes = Collections.unmodifiableList(failedIndexes); + } + + public List getFailedIndexes() { + return failedIndexes; + } + + public boolean hasFailures() { + return failedIndexes.isEmpty() == false; + } + + public static RemoveIndexLifecyclePolicyResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public int hashCode() { + return Objects.hash(failedIndexes); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RemoveIndexLifecyclePolicyResponse other = (RemoveIndexLifecyclePolicyResponse) obj; + return Objects.equals(failedIndexes, other.failedIndexes); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RetryLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RetryLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..6f3acaf19aaea --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RetryLifecyclePolicyRequest.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import org.elasticsearch.client.TimedRequest; + +public class RetryLifecyclePolicyRequest extends TimedRequest { + + private final List indices; + + public RetryLifecyclePolicyRequest(String... indices) { + if (indices.length == 0) { + throw new IllegalArgumentException("Must at least specify one index to retry"); + } + this.indices = Arrays.asList(indices); + } + + public List getIndices() { + return indices; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + RetryLifecyclePolicyRequest that = (RetryLifecyclePolicyRequest) o; + return indices.size() == that.indices.size() && indices.containsAll(that.indices); + } + + @Override + public int hashCode() { + return Objects.hash(indices); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RolloverAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RolloverAction.java new file mode 100644 index 0000000000000..0cc9dcf234969 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RolloverAction.java @@ -0,0 +1,123 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + + +public class RolloverAction implements LifecycleAction, ToXContentObject { + public static final String NAME = "rollover"; + private static final ParseField MAX_SIZE_FIELD = new ParseField("max_size"); + private static final ParseField MAX_DOCS_FIELD = new ParseField("max_docs"); + private static final ParseField MAX_AGE_FIELD = new ParseField("max_age"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + a -> new RolloverAction((ByteSizeValue) a[0], (TimeValue) a[1], (Long) a[2])); + static { + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_SIZE_FIELD.getPreferredName()), MAX_SIZE_FIELD, ValueType.VALUE); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_AGE_FIELD.getPreferredName()), MAX_AGE_FIELD, ValueType.VALUE); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), MAX_DOCS_FIELD); + } + + private final ByteSizeValue maxSize; + private final Long maxDocs; + private final TimeValue maxAge; + + public static RolloverAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public RolloverAction(ByteSizeValue maxSize, TimeValue maxAge, Long maxDocs) { + if (maxSize == null && maxAge == null && maxDocs == null) { + throw new IllegalArgumentException("At least one rollover condition must be set."); + } + this.maxSize = maxSize; + this.maxAge = maxAge; + this.maxDocs = maxDocs; + } + public ByteSizeValue getMaxSize() { + return maxSize; + } + + public TimeValue getMaxAge() { + return maxAge; + } + + public Long getMaxDocs() { + return maxDocs; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (maxSize != null) { + builder.field(MAX_SIZE_FIELD.getPreferredName(), maxSize.getStringRep()); + } + if (maxAge != null) { + builder.field(MAX_AGE_FIELD.getPreferredName(), maxAge.getStringRep()); + } + if (maxDocs != null) { + builder.field(MAX_DOCS_FIELD.getPreferredName(), maxDocs); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(maxSize, maxAge, maxDocs); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + RolloverAction other = (RolloverAction) obj; + return Objects.equals(maxSize, other.maxSize) && + Objects.equals(maxAge, other.maxAge) && + Objects.equals(maxDocs, other.maxDocs); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ShrinkAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ShrinkAction.java new file mode 100644 index 0000000000000..345356380145e --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ShrinkAction.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class ShrinkAction implements LifecycleAction, ToXContentObject { + public static final String NAME = "shrink"; + private static final ParseField NUMBER_OF_SHARDS_FIELD = new ParseField("number_of_shards"); + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(NAME, a -> new ShrinkAction((Integer) a[0])); + + static { + PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_SHARDS_FIELD); + } + + private int numberOfShards; + + public static ShrinkAction parse(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public ShrinkAction(int numberOfShards) { + if (numberOfShards <= 0) { + throw new IllegalArgumentException("[" + NUMBER_OF_SHARDS_FIELD.getPreferredName() + "] must be greater than 0"); + } + this.numberOfShards = numberOfShards; + } + + int getNumberOfShards() { + return numberOfShards; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NUMBER_OF_SHARDS_FIELD.getPreferredName(), numberOfShards); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ShrinkAction that = (ShrinkAction) o; + return Objects.equals(numberOfShards, that.numberOfShards); + } + + @Override + public int hashCode() { + return Objects.hash(numberOfShards); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StartILMRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StartILMRequest.java new file mode 100644 index 0000000000000..84cc844a92a98 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StartILMRequest.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; + +public class StartILMRequest extends TimedRequest { + + public StartILMRequest() { + } + + @Override + public int hashCode() { + return 64; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StopILMRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StopILMRequest.java new file mode 100644 index 0000000000000..1695fc0dd7aea --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StopILMRequest.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; + +public class StopILMRequest extends TimedRequest { + + public StopILMRequest() { + } + + @Override + public int hashCode() { + return 75; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } +} diff --git a/client/rest-high-level/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider b/client/rest-high-level/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider new file mode 100644 index 0000000000000..4204a868246a5 --- /dev/null +++ b/client/rest-high-level/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider @@ -0,0 +1 @@ +org.elasticsearch.client.indexlifecycle.IndexLifecycleNamedXContentProvider \ No newline at end of file diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java new file mode 100644 index 0000000000000..f2040bc88da34 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java @@ -0,0 +1,286 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.indexlifecycle.AllocateAction; +import org.elasticsearch.client.indexlifecycle.DeleteAction; +import org.elasticsearch.client.indexlifecycle.DeleteLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleResponse; +import org.elasticsearch.client.indexlifecycle.ForceMergeAction; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyResponse; +import org.elasticsearch.client.indexlifecycle.IndexLifecycleExplainResponse; +import org.elasticsearch.client.indexlifecycle.LifecycleAction; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusRequest; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusResponse; +import org.elasticsearch.client.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.client.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.client.indexlifecycle.OperationMode; +import org.elasticsearch.client.indexlifecycle.Phase; +import org.elasticsearch.client.indexlifecycle.PhaseExecutionInfo; +import org.elasticsearch.client.indexlifecycle.PutLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyResponse; +import org.elasticsearch.client.indexlifecycle.RolloverAction; +import org.elasticsearch.client.indexlifecycle.ShrinkAction; +import org.elasticsearch.client.indexlifecycle.StartILMRequest; +import org.elasticsearch.client.indexlifecycle.StopILMRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.elasticsearch.client.indexlifecycle.LifecyclePolicyTests.createRandomPolicy; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItems; +import static org.hamcrest.Matchers.is; + +public class IndexLifecycleIT extends ESRestHighLevelClientTestCase { + + public void testRemoveIndexLifecyclePolicy() throws Exception { + String policyName = randomAlphaOfLength(10); + LifecyclePolicy policy = createRandomPolicy(policyName); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + + createIndex("foo", Settings.builder().put("index.lifecycle.name", policyName).build()); + createIndex("baz", Settings.builder().put("index.lifecycle.name", policyName).build()); + createIndex("rbh", Settings.builder().put("index.lifecycle.name", policyName).build()); + + GetSettingsRequest getSettingsRequest = new GetSettingsRequest().indices("foo", "baz", "rbh"); + GetSettingsResponse settingsResponse = highLevelClient().indices().getSettings(getSettingsRequest, RequestOptions.DEFAULT); + assertThat(settingsResponse.getSetting("foo", "index.lifecycle.name"), equalTo(policyName)); + assertThat(settingsResponse.getSetting("baz", "index.lifecycle.name"), equalTo(policyName)); + assertThat(settingsResponse.getSetting("rbh", "index.lifecycle.name"), equalTo(policyName)); + + List indices = new ArrayList<>(); + indices.add("foo"); + indices.add("rbh"); + RemoveIndexLifecyclePolicyRequest removeReq = new RemoveIndexLifecyclePolicyRequest(indices); + RemoveIndexLifecyclePolicyResponse removeResp = execute(removeReq, highLevelClient().indexLifecycle()::removeIndexLifecyclePolicy, + highLevelClient().indexLifecycle()::removeIndexLifecyclePolicyAsync); + assertThat(removeResp.hasFailures(), is(false)); + assertThat(removeResp.getFailedIndexes().isEmpty(), is(true)); + + getSettingsRequest = new GetSettingsRequest().indices("foo", "baz", "rbh"); + settingsResponse = highLevelClient().indices().getSettings(getSettingsRequest, RequestOptions.DEFAULT); + assertNull(settingsResponse.getSetting("foo", "index.lifecycle.name")); + assertThat(settingsResponse.getSetting("baz", "index.lifecycle.name"), equalTo(policyName)); + assertNull(settingsResponse.getSetting("rbh", "index.lifecycle.name")); + } + + public void testStartStopILM() throws Exception { + String policyName = randomAlphaOfLength(10); + LifecyclePolicy policy = createRandomPolicy(policyName); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + + createIndex("foo", Settings.builder().put("index.lifecycle.name", "bar").build()); + createIndex("baz", Settings.builder().put("index.lifecycle.name", "eggplant").build()); + createIndex("squash", Settings.EMPTY); + + LifecycleManagementStatusRequest statusRequest = new LifecycleManagementStatusRequest(); + LifecycleManagementStatusResponse statusResponse = execute( + statusRequest, + highLevelClient().indexLifecycle()::lifecycleManagementStatus, + highLevelClient().indexLifecycle()::lifecycleManagementStatusAsync); + assertEquals(statusResponse.getOperationMode(), OperationMode.RUNNING); + + StopILMRequest stopReq = new StopILMRequest(); + AcknowledgedResponse stopResponse = execute(stopReq, highLevelClient().indexLifecycle()::stopILM, + highLevelClient().indexLifecycle()::stopILMAsync); + assertTrue(stopResponse.isAcknowledged()); + + + statusResponse = execute(statusRequest, highLevelClient().indexLifecycle()::lifecycleManagementStatus, + highLevelClient().indexLifecycle()::lifecycleManagementStatusAsync); + assertThat(statusResponse.getOperationMode(), + Matchers.anyOf(equalTo(OperationMode.STOPPING), + equalTo(OperationMode.STOPPED))); + + StartILMRequest startReq = new StartILMRequest(); + AcknowledgedResponse startResponse = execute(startReq, highLevelClient().indexLifecycle()::startILM, + highLevelClient().indexLifecycle()::startILMAsync); + assertTrue(startResponse.isAcknowledged()); + + statusResponse = execute(statusRequest, highLevelClient().indexLifecycle()::lifecycleManagementStatus, + highLevelClient().indexLifecycle()::lifecycleManagementStatusAsync); + assertEquals(statusResponse.getOperationMode(), OperationMode.RUNNING); + } + + public void testExplainLifecycle() throws Exception { + Map lifecyclePhases = new HashMap<>(); + Map hotActions = Collections.singletonMap( + RolloverAction.NAME, + new RolloverAction(null, TimeValue.timeValueHours(50 * 24), null)); + Phase hotPhase = new Phase("hot", randomFrom(TimeValue.ZERO, null), hotActions); + lifecyclePhases.put("hot", hotPhase); + + Map warmActions = new HashMap<>(); + warmActions.put(AllocateAction.NAME, new AllocateAction(null, null, null, Collections.singletonMap("_name", "node-1"))); + warmActions.put(ShrinkAction.NAME, new ShrinkAction(1)); + warmActions.put(ForceMergeAction.NAME, new ForceMergeAction(1000)); + lifecyclePhases.put("warm", new Phase("warm", TimeValue.timeValueSeconds(1000), warmActions)); + + Map coldActions = new HashMap<>(); + coldActions.put(AllocateAction.NAME, new AllocateAction(0, null, null, null)); + lifecyclePhases.put("cold", new Phase("cold", TimeValue.timeValueSeconds(2000), coldActions)); + + Map deleteActions = Collections.singletonMap(DeleteAction.NAME, new DeleteAction()); + lifecyclePhases.put("delete", new Phase("delete", TimeValue.timeValueSeconds(3000), deleteActions)); + + LifecyclePolicy policy = new LifecyclePolicy(randomAlphaOfLength(10), lifecyclePhases); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + AcknowledgedResponse putResponse = execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync); + assertTrue(putResponse.isAcknowledged()); + GetLifecyclePolicyRequest getRequest = new GetLifecyclePolicyRequest(policy.getName()); + GetLifecyclePolicyResponse getResponse = execute(getRequest, highLevelClient().indexLifecycle()::getLifecyclePolicy, + highLevelClient().indexLifecycle()::getLifecyclePolicyAsync); + long expectedPolicyModifiedDate = getResponse.getPolicies().get(policy.getName()).getModifiedDate(); + + + createIndex("foo-01", Settings.builder().put("index.lifecycle.name", policy.getName()) + .put("index.lifecycle.rollover_alias", "foo-alias").build(), "", "\"foo-alias\" : {}"); + + createIndex("baz-01", Settings.builder().put("index.lifecycle.name", policy.getName()) + .put("index.lifecycle.rollover_alias", "baz-alias").build(), "", "\"baz-alias\" : {}"); + + createIndex("squash", Settings.EMPTY); + + ExplainLifecycleRequest req = new ExplainLifecycleRequest(); + req.indices("foo-01", "baz-01", "squash"); + ExplainLifecycleResponse response = execute(req, highLevelClient().indexLifecycle()::explainLifecycle, + highLevelClient().indexLifecycle()::explainLifecycleAsync); + Map indexResponses = response.getIndexResponses(); + assertEquals(3, indexResponses.size()); + IndexLifecycleExplainResponse fooResponse = indexResponses.get("foo-01"); + assertNotNull(fooResponse); + assertTrue(fooResponse.managedByILM()); + assertEquals("foo-01", fooResponse.getIndex()); + assertEquals("hot", fooResponse.getPhase()); + assertEquals("rollover", fooResponse.getAction()); + assertEquals("attempt_rollover", fooResponse.getStep()); + assertEquals(new PhaseExecutionInfo(policy.getName(), new Phase("", hotPhase.getMinimumAge(), hotPhase.getActions()), + 1L, expectedPolicyModifiedDate), fooResponse.getPhaseExecutionInfo()); + IndexLifecycleExplainResponse bazResponse = indexResponses.get("baz-01"); + assertNotNull(bazResponse); + assertTrue(bazResponse.managedByILM()); + assertEquals("baz-01", bazResponse.getIndex()); + assertEquals("hot", bazResponse.getPhase()); + assertEquals("rollover", bazResponse.getAction()); + assertEquals("attempt_rollover", bazResponse.getStep()); + IndexLifecycleExplainResponse squashResponse = indexResponses.get("squash"); + assertNotNull(squashResponse); + assertFalse(squashResponse.managedByILM()); + assertEquals("squash", squashResponse.getIndex()); + } + + public void testDeleteLifecycle() throws IOException { + String policyName = randomAlphaOfLength(10); + LifecyclePolicy policy = createRandomPolicy(policyName); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + + DeleteLifecyclePolicyRequest deleteRequest = new DeleteLifecyclePolicyRequest(policy.getName()); + assertAcked(execute(deleteRequest, highLevelClient().indexLifecycle()::deleteLifecyclePolicy, + highLevelClient().indexLifecycle()::deleteLifecyclePolicyAsync)); + + GetLifecyclePolicyRequest getRequest = new GetLifecyclePolicyRequest(policyName); + ElasticsearchStatusException ex = expectThrows(ElasticsearchStatusException.class, + () -> execute(getRequest, highLevelClient().indexLifecycle()::getLifecyclePolicy, + highLevelClient().indexLifecycle()::getLifecyclePolicyAsync)); + assertEquals(404, ex.status().getStatus()); + } + + public void testPutLifecycle() throws IOException { + String name = randomAlphaOfLengthBetween(5, 20); + LifecyclePolicy policy = createRandomPolicy(name); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + + GetLifecyclePolicyRequest getRequest = new GetLifecyclePolicyRequest(name); + GetLifecyclePolicyResponse response = execute(getRequest, highLevelClient().indexLifecycle()::getLifecyclePolicy, + highLevelClient().indexLifecycle()::getLifecyclePolicyAsync); + assertEquals(policy, response.getPolicies().get(name).getPolicy()); + } + + public void testGetMultipleLifecyclePolicies() throws IOException { + int numPolicies = randomIntBetween(1, 10); + String[] policyNames = new String[numPolicies]; + LifecyclePolicy[] policies = new LifecyclePolicy[numPolicies]; + for (int i = 0; i < numPolicies; i++) { + policyNames[i] = "policy-" + randomAlphaOfLengthBetween(5, 10); + policies[i] = createRandomPolicy(policyNames[i]); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policies[i]); + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + } + + GetLifecyclePolicyRequest getRequest = new GetLifecyclePolicyRequest(randomFrom(policyNames, null)); + GetLifecyclePolicyResponse response = execute(getRequest, highLevelClient().indexLifecycle()::getLifecyclePolicy, + highLevelClient().indexLifecycle()::getLifecyclePolicyAsync); + List retrievedPolicies = Arrays.stream(response.getPolicies().values().toArray()) + .map(p -> ((LifecyclePolicyMetadata) p).getPolicy()).collect(Collectors.toList()); + assertThat(retrievedPolicies, hasItems(policies)); + } + + public void testRetryLifecycleStep() throws IOException { + String policyName = randomAlphaOfLength(10); + LifecyclePolicy policy = createRandomPolicy(policyName); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + createIndex("retry", Settings.builder().put("index.lifecycle.name", policy.getName()).build()); + RetryLifecyclePolicyRequest retryRequest = new RetryLifecyclePolicyRequest("retry"); + ElasticsearchStatusException ex = expectThrows(ElasticsearchStatusException.class, + () -> execute( + retryRequest, highLevelClient().indexLifecycle()::retryLifecycleStep, + highLevelClient().indexLifecycle()::retryLifecycleStepAsync + ) + ); + assertEquals(400, ex.status().getStatus()); + assertEquals( + "Elasticsearch exception [type=illegal_argument_exception, reason=cannot retry an action for an index [retry]" + + " that has not encountered an error when running a Lifecycle Policy]", + ex.getRootCause().getMessage() + ); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleRequestConvertersTests.java new file mode 100644 index 0000000000000..0030fd0773a78 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleRequestConvertersTests.java @@ -0,0 +1,171 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.indexlifecycle.DeleteLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusRequest; +import org.elasticsearch.client.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.client.indexlifecycle.PutLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.StartILMRequest; +import org.elasticsearch.client.indexlifecycle.StopILMRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.client.RequestConvertersTests.randomIndicesNames; +import static org.elasticsearch.client.RequestConvertersTests.setRandomIndicesOptions; +import static org.elasticsearch.client.RequestConvertersTests.setRandomMasterTimeout; +import static org.elasticsearch.client.RequestConvertersTests.setRandomTimeoutTimeValue; +import static org.elasticsearch.client.indexlifecycle.LifecyclePolicyTests.createRandomPolicy; +import static org.hamcrest.CoreMatchers.equalTo; + +public class IndexLifecycleRequestConvertersTests extends ESTestCase { + + public void testGetLifecyclePolicy() { + String[] policies = rarely() ? null : randomIndicesNames(0, 10); + GetLifecyclePolicyRequest req = new GetLifecyclePolicyRequest(policies); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.getLifecyclePolicy(req); + assertEquals(request.getMethod(), HttpGet.METHOD_NAME); + String policiesStr = Strings.arrayToCommaDelimitedString(policies); + assertEquals(request.getEndpoint(), "/_ilm/policy" + (policiesStr.isEmpty() ? "" : ("/" + policiesStr))); + assertEquals(request.getParameters(), expectedParams); + } + + public void testPutLifecyclePolicy() throws Exception { + String name = randomAlphaOfLengthBetween(2, 20); + LifecyclePolicy policy = createRandomPolicy(name); + PutLifecyclePolicyRequest req = new PutLifecyclePolicyRequest(policy); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.putLifecyclePolicy(req); + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + assertEquals("/_ilm/policy/" + name, request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + } + + public void testDeleteLifecycle() { + String lifecycleName = randomAlphaOfLengthBetween(2,20); + DeleteLifecyclePolicyRequest req = new DeleteLifecyclePolicyRequest(lifecycleName); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.deleteLifecyclePolicy(req); + assertEquals(request.getMethod(), HttpDelete.METHOD_NAME); + assertEquals(request.getEndpoint(), "/_ilm/policy/" + lifecycleName); + assertEquals(request.getParameters(), expectedParams); + } + + public void testRemoveIndexLifecyclePolicy() { + Map expectedParams = new HashMap<>(); + String[] indices = randomIndicesNames(0, 10); + IndicesOptions indicesOptions = setRandomIndicesOptions(IndicesOptions.strictExpandOpen(), expectedParams); + RemoveIndexLifecyclePolicyRequest req = new RemoveIndexLifecyclePolicyRequest(Arrays.asList(indices), indicesOptions); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.removeIndexLifecyclePolicy(req); + assertThat(request.getMethod(), equalTo(HttpDelete.METHOD_NAME)); + String idxString = Strings.arrayToCommaDelimitedString(indices); + assertThat(request.getEndpoint(), equalTo("/" + (idxString.isEmpty() ? "" : (idxString + "/")) + "_ilm")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } + + public void testStartILM() throws Exception { + StartILMRequest req = new StartILMRequest(); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.startILM(req); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(request.getEndpoint(), equalTo("/_ilm/start")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } + + public void testStopILM() throws Exception { + StopILMRequest req = new StopILMRequest(); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.stopILM(req); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(request.getEndpoint(), equalTo("/_ilm/stop")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } + + public void testLifecycleManagementStatus() throws Exception { + LifecycleManagementStatusRequest req = new LifecycleManagementStatusRequest(); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.lifecycleManagementStatus(req); + assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); + assertThat(request.getEndpoint(), equalTo("/_ilm/status")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } + + public void testExplainLifecycle() throws Exception { + ExplainLifecycleRequest req = new ExplainLifecycleRequest(); + String[] indices = rarely() ? null : randomIndicesNames(0, 10); + req.indices(indices); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req, expectedParams); + setRandomIndicesOptions(req::indicesOptions, req::indicesOptions, expectedParams); + + Request request = IndexLifecycleRequestConverters.explainLifecycle(req); + assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); + String idxString = Strings.arrayToCommaDelimitedString(indices); + assertThat(request.getEndpoint(), equalTo("/" + (idxString.isEmpty() ? "" : (idxString + "/")) + "_ilm/explain")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } + + public void testRetryLifecycle() throws Exception { + String[] indices = randomIndicesNames(1, 10); + RetryLifecyclePolicyRequest req = new RetryLifecyclePolicyRequest(indices); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + Request request = IndexLifecycleRequestConverters.retryLifecycle(req); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + String idxString = Strings.arrayToCommaDelimitedString(indices); + assertThat(request.getEndpoint(), equalTo("/" + (idxString.isEmpty() ? "" : (idxString + "/")) + "_ilm/retry")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 32b07316f6a9e..baead5d04b61f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -1610,6 +1610,24 @@ static void setRandomIndicesOptions(Consumer setter, Supplier expectedParams) { + if (randomBoolean()) { + indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + } + expectedParams.put("ignore_unavailable", Boolean.toString(indicesOptions.ignoreUnavailable())); + expectedParams.put("allow_no_indices", Boolean.toString(indicesOptions.allowNoIndices())); + if (indicesOptions.expandWildcardsOpen() && indicesOptions.expandWildcardsClosed()) { + expectedParams.put("expand_wildcards", "open,closed"); + } else if (indicesOptions.expandWildcardsOpen()) { + expectedParams.put("expand_wildcards", "open"); + } else if (indicesOptions.expandWildcardsClosed()) { + expectedParams.put("expand_wildcards", "closed"); + } else { + expectedParams.put("expand_wildcards", "none"); + } + return indicesOptions; + } + static void setRandomIncludeDefaults(GetIndexRequest request, Map expectedParams) { if (randomBoolean()) { boolean includeDefaults = randomBoolean(); @@ -1660,6 +1678,17 @@ static void setRandomTimeout(Consumer setter, TimeValue defaultTimeout, } } + static void setRandomTimeoutTimeValue(Consumer setter, TimeValue defaultTimeout, + Map expectedParams) { + if (randomBoolean()) { + TimeValue timeout = TimeValue.parseTimeValue(randomTimeValue(), "random_timeout"); + setter.accept(timeout); + expectedParams.put("timeout", timeout.getStringRep()); + } else { + expectedParams.put("timeout", defaultTimeout.getStringRep()); + } + } + static void setRandomMasterTimeout(MasterNodeRequest request, Map expectedParams) { setRandomMasterTimeout(request::masterNodeTimeout, expectedParams); } @@ -1680,6 +1709,16 @@ static void setRandomMasterTimeout(Consumer setter, Map } } + static void setRandomMasterTimeout(Consumer setter, TimeValue defaultTimeout, Map expectedParams) { + if (randomBoolean()) { + TimeValue masterTimeout = TimeValue.parseTimeValue(randomTimeValue(), "random_master_timeout"); + setter.accept(masterTimeout); + expectedParams.put("master_timeout", masterTimeout.getStringRep()); + } else { + expectedParams.put("master_timeout", defaultTimeout.getStringRep()); + } + } + static void setRandomWaitForActiveShards(Consumer setter, Map expectedParams) { setRandomWaitForActiveShards(setter, ActiveShardCount.DEFAULT, expectedParams); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 6abd89db5dd91..632fa48d132ab 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.client; import com.fasterxml.jackson.core.JsonParseException; - import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; @@ -50,6 +49,13 @@ import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.client.indexlifecycle.AllocateAction; +import org.elasticsearch.client.indexlifecycle.DeleteAction; +import org.elasticsearch.client.indexlifecycle.ForceMergeAction; +import org.elasticsearch.client.indexlifecycle.LifecycleAction; +import org.elasticsearch.client.indexlifecycle.ReadOnlyAction; +import org.elasticsearch.client.indexlifecycle.RolloverAction; +import org.elasticsearch.client.indexlifecycle.ShrinkAction; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.bytes.BytesReference; @@ -619,7 +625,7 @@ public void testDefaultNamedXContents() { public void testProvidedNamedXContents() { List namedXContents = RestHighLevelClient.getProvidedNamedXContents(); - assertEquals(10, namedXContents.size()); + assertEquals(16, namedXContents.size()); Map, Integer> categories = new HashMap<>(); List names = new ArrayList<>(); for (NamedXContentRegistry.Entry namedXContent : namedXContents) { @@ -629,7 +635,7 @@ public void testProvidedNamedXContents() { categories.put(namedXContent.categoryClass, counter + 1); } } - assertEquals(3, categories.size()); + assertEquals(4, categories.size()); assertEquals(Integer.valueOf(2), categories.get(Aggregation.class)); assertTrue(names.contains(ChildrenAggregationBuilder.NAME)); assertTrue(names.contains(MatrixStatsAggregationBuilder.NAME)); @@ -643,6 +649,13 @@ public void testProvidedNamedXContents() { assertTrue(names.contains(MeanReciprocalRank.NAME)); assertTrue(names.contains(DiscountedCumulativeGain.NAME)); assertTrue(names.contains(ExpectedReciprocalRank.NAME)); + assertEquals(Integer.valueOf(6), categories.get(LifecycleAction.class)); + assertTrue(names.contains(AllocateAction.NAME)); + assertTrue(names.contains(DeleteAction.NAME)); + assertTrue(names.contains(ForceMergeAction.NAME)); + assertTrue(names.contains(ReadOnlyAction.NAME)); + assertTrue(names.contains(RolloverAction.NAME)); + assertTrue(names.contains(ShrinkAction.NAME)); } public void testMethodWithHeadersArgumentAreDeprecated() { @@ -789,7 +802,8 @@ public void testApiNamingConventions() throws Exception { apiName.startsWith("watcher.") == false && apiName.startsWith("graph.") == false && apiName.startsWith("migration.") == false && - apiName.startsWith("security.") == false) { + apiName.startsWith("security.") == false && + apiName.startsWith("index_lifecycle.") == false) { apiNotFound.add(apiName); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/TimedRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/TimedRequestTests.java new file mode 100644 index 0000000000000..8024aa0188598 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/TimedRequestTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; + +public class TimedRequestTests extends ESTestCase { + + public void testDefaults() { + TimedRequest timedRequest = new TimedRequest(){}; + assertEquals(timedRequest.timeout(), TimedRequest.DEFAULT_ACK_TIMEOUT); + assertEquals(timedRequest.masterNodeTimeout(), TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT); + } + + public void testNonDefaults() { + TimedRequest timedRequest = new TimedRequest(){}; + TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); + TimeValue masterTimeout = TimeValue.timeValueSeconds(randomIntBetween(0,1000)); + timedRequest.setTimeout(timeout); + timedRequest.setMasterTimeout(masterTimeout); + assertEquals(timedRequest.timeout(), timeout); + assertEquals(timedRequest.masterNodeTimeout(), masterTimeout); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/AllocateActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/AllocateActionTests.java new file mode 100644 index 0000000000000..e44eb0da0e188 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/AllocateActionTests.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class AllocateActionTests extends AbstractXContentTestCase { + + @Override + protected AllocateAction createTestInstance() { + return randomInstance(); + } + + static AllocateAction randomInstance() { + boolean hasAtLeastOneMap = false; + Map includes; + if (randomBoolean()) { + includes = randomMap(1, 100); + hasAtLeastOneMap = true; + } else { + includes = randomBoolean() ? null : Collections.emptyMap(); + } + Map excludes; + if (randomBoolean()) { + hasAtLeastOneMap = true; + excludes = randomMap(1, 100); + } else { + excludes = randomBoolean() ? null : Collections.emptyMap(); + } + Map requires; + if (hasAtLeastOneMap == false || randomBoolean()) { + requires = randomMap(1, 100); + } else { + requires = randomBoolean() ? null : Collections.emptyMap(); + } + Integer numberOfReplicas = randomBoolean() ? null : randomIntBetween(0, 10); + return new AllocateAction(numberOfReplicas, includes, excludes, requires); + } + + @Override + protected AllocateAction doParseInstance(XContentParser parser) { + return AllocateAction.parse(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testAllMapsNullOrEmpty() { + Map include = randomBoolean() ? null : Collections.emptyMap(); + Map exclude = randomBoolean() ? null : Collections.emptyMap(); + Map require = randomBoolean() ? null : Collections.emptyMap(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new AllocateAction(null, include, exclude, require)); + assertEquals("At least one of " + AllocateAction.INCLUDE_FIELD.getPreferredName() + ", " + + AllocateAction.EXCLUDE_FIELD.getPreferredName() + " or " + AllocateAction.REQUIRE_FIELD.getPreferredName() + + "must contain attributes for action " + AllocateAction.NAME, exception.getMessage()); + } + + public void testInvalidNumberOfReplicas() { + Map include = randomMap(1, 5); + Map exclude = randomBoolean() ? null : Collections.emptyMap(); + Map require = randomBoolean() ? null : Collections.emptyMap(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new AllocateAction(randomIntBetween(-1000, -1), include, exclude, require)); + assertEquals("[" + AllocateAction.NUMBER_OF_REPLICAS_FIELD.getPreferredName() + "] must be >= 0", exception.getMessage()); + } + + public static Map randomMap(int minEntries, int maxEntries) { + Map map = new HashMap<>(); + int numIncludes = randomIntBetween(minEntries, maxEntries); + for (int i = 0; i < numIncludes; i++) { + map.put(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + } + return map; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteActionTests.java new file mode 100644 index 0000000000000..fb7deb97a2787 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteActionTests.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +public class DeleteActionTests extends AbstractXContentTestCase { + + @Override + protected DeleteAction createTestInstance() { + return new DeleteAction(); + } + + @Override + protected DeleteAction doParseInstance(XContentParser parser) { + return DeleteAction.parse(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequestTests.java new file mode 100644 index 0000000000000..01f6288d81d4b --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequestTests.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; + +public class DeleteLifecyclePolicyRequestTests extends ESTestCase { + + private DeleteLifecyclePolicyRequest createTestInstance() { + return new DeleteLifecyclePolicyRequest(randomAlphaOfLengthBetween(2, 20)); + } + + public void testValidate() { + DeleteLifecyclePolicyRequest req = createTestInstance(); + assertFalse(req.validate().isPresent()); + + } + + public void testValidationFailure() { + expectThrows(IllegalArgumentException.class, () -> new DeleteLifecyclePolicyRequest(randomFrom("", null))); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequestTests.java new file mode 100644 index 0000000000000..1106394333967 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequestTests.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.util.Arrays; + +public class ExplainLifecycleRequestTests extends ESTestCase { + + public void testEqualsAndHashcode() { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), this::copy, this::mutateInstance); + } + + private ExplainLifecycleRequest createTestInstance() { + ExplainLifecycleRequest request = new ExplainLifecycleRequest(); + if (randomBoolean()) { + request.indices(generateRandomStringArray(20, 20, false, true)); + } + if (randomBoolean()) { + IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean()); + request.indicesOptions(indicesOptions); + } + return request; + } + + private ExplainLifecycleRequest mutateInstance(ExplainLifecycleRequest instance) { + String[] indices = instance.indices(); + IndicesOptions indicesOptions = instance.indicesOptions(); + switch (between(0, 1)) { + case 0: + indices = randomValueOtherThanMany(i -> Arrays.equals(i, instance.indices()), + () -> generateRandomStringArray(20, 10, false, true)); + break; + case 1: + indicesOptions = randomValueOtherThan(indicesOptions, () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + ExplainLifecycleRequest newRequest = new ExplainLifecycleRequest(); + newRequest.indices(indices); + newRequest.indicesOptions(indicesOptions); + return newRequest; + } + + private ExplainLifecycleRequest copy(ExplainLifecycleRequest original) { + ExplainLifecycleRequest copy = new ExplainLifecycleRequest(); + copy.indices(original.indices()); + copy.indicesOptions(original.indicesOptions()); + return copy; + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponseTests.java new file mode 100644 index 0000000000000..26eacb04b024f --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponseTests.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class ExplainLifecycleResponseTests extends AbstractXContentTestCase { + + @Override + protected ExplainLifecycleResponse createTestInstance() { + Map indexResponses = new HashMap<>(); + for (int i = 0; i < randomIntBetween(0, 2); i++) { + IndexLifecycleExplainResponse indexResponse = IndexExplainResponseTests.randomIndexExplainResponse(); + indexResponses.put(indexResponse.getIndex(), indexResponse); + } + return new ExplainLifecycleResponse(indexResponses); + } + + @Override + protected ExplainLifecycleResponse doParseInstance(XContentParser parser) throws IOException { + return ExplainLifecycleResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)); + return new NamedXContentRegistry(entries); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ForceMergeActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ForceMergeActionTests.java new file mode 100644 index 0000000000000..16fafcfa24015 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ForceMergeActionTests.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class ForceMergeActionTests extends AbstractXContentTestCase { + + @Override + protected ForceMergeAction doParseInstance(XContentParser parser) { + return ForceMergeAction.parse(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected ForceMergeAction createTestInstance() { + return randomInstance(); + } + + static ForceMergeAction randomInstance() { + return new ForceMergeAction(randomIntBetween(1, 100)); + } + + public void testMissingMaxNumSegments() throws IOException { + BytesReference emptyObject = BytesReference.bytes(JsonXContent.contentBuilder().startObject().endObject()); + XContentParser parser = XContentHelper.createParser(null, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + emptyObject, XContentType.JSON); + Exception e = expectThrows(IllegalArgumentException.class, () -> ForceMergeAction.parse(parser)); + assertThat(e.getMessage(), equalTo("Required [max_num_segments]")); + } + + public void testInvalidNegativeSegmentNumber() { + Exception r = expectThrows(IllegalArgumentException.class, () -> new ForceMergeAction(randomIntBetween(-10, 0))); + assertThat(r.getMessage(), equalTo("[max_num_segments] must be a positive integer")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequestTests.java new file mode 100644 index 0000000000000..06d28207ce93a --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequestTests.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; + +public class GetLifecyclePolicyRequestTests extends ESTestCase { + + private GetLifecyclePolicyRequest createTestInstance() { + int numPolicies = randomIntBetween(0, 10); + String[] policyNames = new String[numPolicies]; + for (int i = 0; i < numPolicies; i++) { + policyNames[i] = "policy-" + randomAlphaOfLengthBetween(2, 5); + } + return new GetLifecyclePolicyRequest(policyNames); + } + + public void testValidation() { + GetLifecyclePolicyRequest request = createTestInstance(); + assertFalse(request.validate().isPresent()); + } + + public void testNullPolicyNameShouldFail() { + expectThrows(IllegalArgumentException.class, + () -> new GetLifecyclePolicyRequest(randomAlphaOfLengthBetween(2,20), null, randomAlphaOfLengthBetween(2,20))); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponseTests.java new file mode 100644 index 0000000000000..89dfbb8635332 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponseTests.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.client.indexlifecycle.LifecyclePolicyTests.createRandomPolicy; + +public class GetLifecyclePolicyResponseTests extends AbstractXContentTestCase { + + @Override + protected GetLifecyclePolicyResponse createTestInstance() { + int numPolicies = randomIntBetween(1, 10); + ImmutableOpenMap.Builder policies = ImmutableOpenMap.builder(); + for (int i = 0; i < numPolicies; i++) { + String policyName = "policy-" + randomAlphaOfLengthBetween(2, 5); + LifecyclePolicy policy = createRandomPolicy(policyName); + policies.put(policyName, new LifecyclePolicyMetadata(policy, randomLong(), randomLong())); + } + return new GetLifecyclePolicyResponse(policies.build()); + } + + @Override + protected GetLifecyclePolicyResponse doParseInstance(XContentParser parser) throws IOException { + return GetLifecyclePolicyResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse) + )); + return new NamedXContentRegistry(entries); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/IndexExplainResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/IndexExplainResponseTests.java new file mode 100644 index 0000000000000..fb7e73ee62191 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/IndexExplainResponseTests.java @@ -0,0 +1,122 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +public class IndexExplainResponseTests extends AbstractXContentTestCase { + + static IndexLifecycleExplainResponse randomIndexExplainResponse() { + if (frequently()) { + return randomManagedIndexExplainResponse(); + } else { + return randomUnmanagedIndexExplainResponse(); + } + } + + private static IndexLifecycleExplainResponse randomUnmanagedIndexExplainResponse() { + return IndexLifecycleExplainResponse.newUnmanagedIndexResponse(randomAlphaOfLength(10)); + } + + private static IndexLifecycleExplainResponse randomManagedIndexExplainResponse() { + return IndexLifecycleExplainResponse.newManagedIndexResponse(randomAlphaOfLength(10), randomAlphaOfLength(10), + randomNonNegativeLong(), randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10), + randomBoolean() ? null : randomAlphaOfLength(10), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomBoolean() ? null : new BytesArray(new RandomStepInfo(() -> randomAlphaOfLength(10)).toString()), + randomBoolean() ? null : PhaseExecutionInfoTests.randomPhaseExecutionInfo("")); + } + + @Override + protected IndexLifecycleExplainResponse createTestInstance() { + return randomIndexExplainResponse(); + } + + @Override + protected IndexLifecycleExplainResponse doParseInstance(XContentParser parser) throws IOException { + return IndexLifecycleExplainResponse.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + private static class RandomStepInfo implements ToXContentObject { + + private final String key; + private final String value; + + RandomStepInfo(Supplier randomStringSupplier) { + this.key = randomStringSupplier.get(); + this.value = randomStringSupplier.get(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(key, value); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(key, value); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RandomStepInfo other = (RandomStepInfo) obj; + return Objects.equals(key, other.key) && Objects.equals(value, other.value); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)); + return new NamedXContentRegistry(entries); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponseTests.java new file mode 100644 index 0000000000000..144039b8995c6 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponseTests.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.CoreMatchers; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.stream.Collectors; + +public class LifecycleManagementStatusResponseTests extends ESTestCase { + + public void testAllValidStatuses() { + EnumSet.allOf(OperationMode.class) + .forEach(e -> assertEquals(new LifecycleManagementStatusResponse(e.name()).getOperationMode(), e)); + } + + public void testXContent() throws IOException { + XContentType xContentType = XContentType.JSON; + String mode = randomFrom(EnumSet.allOf(OperationMode.class) + .stream().map(Enum::name).collect(Collectors.toList())); + XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, "{\"operation_mode\" : \"" + mode + "\"}"); + assertEquals(LifecycleManagementStatusResponse.fromXContent(parser).getOperationMode(), OperationMode.fromString(mode)); + } + + public void testXContentInvalid() throws IOException { + XContentType xContentType = XContentType.JSON; + String mode = randomAlphaOfLength(10); + XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, "{\"operation_mode\" : \"" + mode + "\"}"); + Exception e = expectThrows(IllegalArgumentException.class, () -> LifecycleManagementStatusResponse.fromXContent(parser)); + assertThat(e.getMessage(), CoreMatchers.containsString("failed to parse field [operation_mode]")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadataTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadataTests.java new file mode 100644 index 0000000000000..548ba366b640e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadataTests.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.client.indexlifecycle.LifecyclePolicyTests.createRandomPolicy; + +public class LifecyclePolicyMetadataTests extends AbstractXContentTestCase { + + private String policyName; + + @Override + protected LifecyclePolicyMetadata createTestInstance() { + policyName = randomAlphaOfLengthBetween(5,20); + LifecyclePolicy policy = createRandomPolicy(policyName); + return new LifecyclePolicyMetadata(policy, randomLong(), randomLong()); + } + + @Override + protected LifecyclePolicyMetadata doParseInstance(XContentParser parser) throws IOException { + return LifecyclePolicyMetadata.parse(parser, policyName); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse) + )); + return new NamedXContentRegistry(entries); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyTests.java new file mode 100644 index 0000000000000..024cb13d8df37 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyTests.java @@ -0,0 +1,243 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; + +public class LifecyclePolicyTests extends AbstractXContentTestCase { + private static final Set VALID_HOT_ACTIONS = Sets.newHashSet(RolloverAction.NAME); + private static final Set VALID_WARM_ACTIONS = Sets.newHashSet(AllocateAction.NAME, ForceMergeAction.NAME, + ReadOnlyAction.NAME, ShrinkAction.NAME); + private static final Set VALID_COLD_ACTIONS = Sets.newHashSet(AllocateAction.NAME); + private static final Set VALID_DELETE_ACTIONS = Sets.newHashSet(DeleteAction.NAME); + + private String lifecycleName; + + @Override + protected LifecyclePolicy doParseInstance(XContentParser parser) { + return LifecyclePolicy.parse(parser, lifecycleName); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse) + )); + return new NamedXContentRegistry(entries); + } + + @Override + protected LifecyclePolicy createTestInstance() { + lifecycleName = randomAlphaOfLength(5); + return createRandomPolicy(lifecycleName); + } + + public void testValidatePhases() { + boolean invalid = randomBoolean(); + String phaseName = randomFrom("hot", "warm", "cold", "delete"); + if (invalid) { + phaseName += randomAlphaOfLength(5); + } + Map phases = Collections.singletonMap(phaseName, + new Phase(phaseName, TimeValue.ZERO, Collections.emptyMap())); + if (invalid) { + Exception e = expectThrows(IllegalArgumentException.class, () -> new LifecyclePolicy(lifecycleName, phases)); + assertThat(e.getMessage(), equalTo("Lifecycle does not support phase [" + phaseName + "]")); + } else { + new LifecyclePolicy(lifecycleName, phases); + } + } + + public void testValidateHotPhase() { + LifecycleAction invalidAction = null; + Map actions = randomSubsetOf(VALID_HOT_ACTIONS) + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("allocate", "forcemerge", "delete", "shrink")); + actions.put(invalidAction.getName(), invalidAction); + } + Map hotPhase = Collections.singletonMap("hot", + new Phase("hot", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> new LifecyclePolicy(lifecycleName, hotPhase)); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getName() + "] defined in phase [hot]")); + } else { + new LifecyclePolicy(lifecycleName, hotPhase); + } + } + + public void testValidateWarmPhase() { + LifecycleAction invalidAction = null; + Map actions = randomSubsetOf(VALID_WARM_ACTIONS) + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("rollover", "delete")); + actions.put(invalidAction.getName(), invalidAction); + } + Map warmPhase = Collections.singletonMap("warm", + new Phase("warm", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> new LifecyclePolicy(lifecycleName, warmPhase)); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getName() + "] defined in phase [warm]")); + } else { + new LifecyclePolicy(lifecycleName, warmPhase); + } + } + + public void testValidateColdPhase() { + LifecycleAction invalidAction = null; + Map actions = randomSubsetOf(VALID_COLD_ACTIONS) + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("rollover", "delete", "forcemerge", "shrink")); + actions.put(invalidAction.getName(), invalidAction); + } + Map coldPhase = Collections.singletonMap("cold", + new Phase("cold", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> new LifecyclePolicy(lifecycleName, coldPhase)); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getName() + "] defined in phase [cold]")); + } else { + new LifecyclePolicy(lifecycleName, coldPhase); + } + } + + public void testValidateDeletePhase() { + LifecycleAction invalidAction = null; + Map actions = VALID_DELETE_ACTIONS + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("allocate", "rollover", "forcemerge", "shrink")); + actions.put(invalidAction.getName(), invalidAction); + } + Map deletePhase = Collections.singletonMap("delete", + new Phase("delete", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> new LifecyclePolicy(lifecycleName, deletePhase)); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getName() + "] defined in phase [delete]")); + } else { + new LifecyclePolicy(lifecycleName, deletePhase); + } + } + + public static LifecyclePolicy createRandomPolicy(String lifecycleName) { + List phaseNames = randomSubsetOf(Arrays.asList("hot", "warm", "cold", "delete")); + Map phases = new HashMap<>(phaseNames.size()); + Function> validActions = (phase) -> { + switch (phase) { + case "hot": + return VALID_HOT_ACTIONS; + case "warm": + return VALID_WARM_ACTIONS; + case "cold": + return VALID_COLD_ACTIONS; + case "delete": + return VALID_DELETE_ACTIONS; + default: + throw new IllegalArgumentException("invalid phase [" + phase + "]"); + }}; + Function randomAction = (action) -> { + switch (action) { + case AllocateAction.NAME: + return AllocateActionTests.randomInstance(); + case DeleteAction.NAME: + return new DeleteAction(); + case ForceMergeAction.NAME: + return ForceMergeActionTests.randomInstance(); + case ReadOnlyAction.NAME: + return new ReadOnlyAction(); + case RolloverAction.NAME: + return RolloverActionTests.randomInstance(); + case ShrinkAction.NAME: + return ShrinkActionTests.randomInstance(); + default: + throw new IllegalArgumentException("invalid action [" + action + "]"); + }}; + for (String phase : phaseNames) { + TimeValue after = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + Map actions = new HashMap<>(); + List actionNames = randomSubsetOf(validActions.apply(phase)); + for (String action : actionNames) { + actions.put(action, randomAction.apply(action)); + } + phases.put(phase, new Phase(phase, after, actions)); + } + return new LifecyclePolicy(lifecycleName, phases); + } + + private LifecycleAction getTestAction(String actionName) { + switch (actionName) { + case AllocateAction.NAME: + return AllocateActionTests.randomInstance(); + case DeleteAction.NAME: + return new DeleteAction(); + case ForceMergeAction.NAME: + return ForceMergeActionTests.randomInstance(); + case ReadOnlyAction.NAME: + return new ReadOnlyAction(); + case RolloverAction.NAME: + return RolloverActionTests.randomInstance(); + case ShrinkAction.NAME: + return ShrinkActionTests.randomInstance(); + default: + throw new IllegalArgumentException("unsupported phase action [" + actionName + "]"); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/OperationModeTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/OperationModeTests.java new file mode 100644 index 0000000000000..27651ba4a8c41 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/OperationModeTests.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.CoreMatchers; + +import java.util.EnumSet; + +public class OperationModeTests extends ESTestCase { + + public void testIsValidChange() { + assertFalse(OperationMode.RUNNING.isValidChange(OperationMode.RUNNING)); + assertTrue(OperationMode.RUNNING.isValidChange(OperationMode.STOPPING)); + assertFalse(OperationMode.RUNNING.isValidChange(OperationMode.STOPPED)); + + assertTrue(OperationMode.STOPPING.isValidChange(OperationMode.RUNNING)); + assertFalse(OperationMode.STOPPING.isValidChange(OperationMode.STOPPING)); + assertTrue(OperationMode.STOPPING.isValidChange(OperationMode.STOPPED)); + + assertTrue(OperationMode.STOPPED.isValidChange(OperationMode.RUNNING)); + assertFalse(OperationMode.STOPPED.isValidChange(OperationMode.STOPPING)); + assertFalse(OperationMode.STOPPED.isValidChange(OperationMode.STOPPED)); + } + + public void testFromName() { + EnumSet.allOf(OperationMode.class).forEach(e -> assertEquals(OperationMode.fromString(e.name()), e)); + } + + public void testFromNameInvalid() { + String invalidName = randomAlphaOfLength(10); + Exception e = expectThrows(IllegalArgumentException.class, () -> OperationMode.fromString(invalidName)); + assertThat(e.getMessage(), CoreMatchers.containsString(invalidName + " is not a valid operation_mode")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfoTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfoTests.java new file mode 100644 index 0000000000000..0db9b56aea93c --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfoTests.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class PhaseExecutionInfoTests extends AbstractXContentTestCase { + + static PhaseExecutionInfo randomPhaseExecutionInfo(String phaseName) { + return new PhaseExecutionInfo(randomAlphaOfLength(5), PhaseTests.randomPhase(phaseName), + randomNonNegativeLong(), randomNonNegativeLong()); + } + + String phaseName; + + @Before + public void setupPhaseName() { + phaseName = randomAlphaOfLength(7); + } + + @Override + protected PhaseExecutionInfo createTestInstance() { + return randomPhaseExecutionInfo(phaseName); + } + + @Override + protected PhaseExecutionInfo doParseInstance(XContentParser parser) throws IOException { + return PhaseExecutionInfo.parse(parser, phaseName); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)); + return new NamedXContentRegistry(entries); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseTests.java new file mode 100644 index 0000000000000..3b4fc2fec6059 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseTests.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class PhaseTests extends AbstractXContentTestCase { + private String phaseName; + + @Before + public void setup() { + phaseName = randomAlphaOfLength(20); + } + + @Override + protected Phase createTestInstance() { + return randomPhase(phaseName); + } + + static Phase randomPhase(String phaseName) { + TimeValue after = null; + if (randomBoolean()) { + after = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + } + Map actions = Collections.emptyMap(); + if (randomBoolean()) { + actions = Collections.singletonMap(DeleteAction.NAME, new DeleteAction()); + } + return new Phase(phaseName, after, actions); + } + + @Override + protected Phase doParseInstance(XContentParser parser) { + return Phase.parse(parser, phaseName); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)); + return new NamedXContentRegistry(entries); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testDefaultAfter() { + Phase phase = new Phase(randomAlphaOfLength(20), null, Collections.emptyMap()); + assertEquals(TimeValue.ZERO, phase.getMinimumAge()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequestTests.java new file mode 100644 index 0000000000000..26cfe1946ac4d --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequestTests.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.client.indexlifecycle.LifecyclePolicyTests.createRandomPolicy; + +public class PutLifecyclePolicyRequestTests extends ESTestCase { + + private PutLifecyclePolicyRequest createTestInstance() { + return new PutLifecyclePolicyRequest(createRandomPolicy(randomAlphaOfLengthBetween(5, 20))); + } + + public void testValidation() { + PutLifecyclePolicyRequest req = createTestInstance(); + assertFalse(req.validate().isPresent()); + } + + public void testNullPolicy() { + expectThrows(IllegalArgumentException.class, () -> new PutLifecyclePolicyRequest(null)); + } + + public void testNullPolicyName() { + expectThrows(IllegalArgumentException.class, () -> new PutLifecyclePolicyRequest(createRandomPolicy(randomFrom("", null)))); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ReadOnlyActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ReadOnlyActionTests.java new file mode 100644 index 0000000000000..bf57478425cc9 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ReadOnlyActionTests.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +public class ReadOnlyActionTests extends AbstractXContentTestCase { + + @Override + protected ReadOnlyAction doParseInstance(XContentParser parser) { + return ReadOnlyAction.parse(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected ReadOnlyAction createTestInstance() { + return new ReadOnlyAction(); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequestTests.java new file mode 100644 index 0000000000000..532688c475115 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequestTests.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; + +public class RemoveIndexLifecyclePolicyRequestTests extends ESTestCase { + + public void testNullIndices() { + expectThrows(NullPointerException.class, () -> new RemoveIndexLifecyclePolicyRequest(null)); + } + + public void testNullIndicesOptions() { + expectThrows(NullPointerException.class, () -> new RemoveIndexLifecyclePolicyRequest(Collections.emptyList(), null)); + } + + public void testValidate() { + RemoveIndexLifecyclePolicyRequest request = new RemoveIndexLifecyclePolicyRequest(Collections.emptyList()); + assertFalse(request.validate().isPresent()); + } + + protected RemoveIndexLifecyclePolicyRequest createInstance() { + if (randomBoolean()) { + return new RemoveIndexLifecyclePolicyRequest(Arrays.asList(generateRandomStringArray(20, 20, false)), + IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + } else { + return new RemoveIndexLifecyclePolicyRequest(Arrays.asList(generateRandomStringArray(20, 20, false))); + } + } + + private RemoveIndexLifecyclePolicyRequest copyInstance(RemoveIndexLifecyclePolicyRequest req) { + return new RemoveIndexLifecyclePolicyRequest(new ArrayList<>(req.indices()), IndicesOptions.fromOptions( + req.indicesOptions().ignoreUnavailable(), req.indicesOptions().allowNoIndices(), + req.indicesOptions().expandWildcardsOpen(), req.indicesOptions().expandWildcardsClosed(), + req.indicesOptions().allowAliasesToMultipleIndices(), req.indicesOptions().forbidClosedIndices(), + req.indicesOptions().ignoreAliases())); + } + + private RemoveIndexLifecyclePolicyRequest mutateInstance(RemoveIndexLifecyclePolicyRequest req) { + if (randomBoolean()) { + return new RemoveIndexLifecyclePolicyRequest(req.indices(), + randomValueOtherThan(req.indicesOptions(), () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()))); + } else { + return new RemoveIndexLifecyclePolicyRequest( + randomValueOtherThan(req.indices(), () -> Arrays.asList(generateRandomStringArray(20, 20, false))), + req.indicesOptions()); + } + } + + public void testEqualsAndHashCode() { + for (int count = 0; count < 100; ++count) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createInstance(), this::copyInstance, this::mutateInstance); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponseTests.java new file mode 100644 index 0000000000000..1f99a2dfdfac4 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponseTests.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class RemoveIndexLifecyclePolicyResponseTests extends ESTestCase { + + private void toXContent(RemoveIndexLifecyclePolicyResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + builder.field(RemoveIndexLifecyclePolicyResponse.HAS_FAILURES_FIELD.getPreferredName(), response.hasFailures()); + builder.field(RemoveIndexLifecyclePolicyResponse.FAILED_INDEXES_FIELD.getPreferredName(), response.getFailedIndexes()); + builder.endObject(); + } + + private RemoveIndexLifecyclePolicyResponse createInstance() { + List failedIndexes = Arrays.asList(generateRandomStringArray(20, 20, false)); + return new RemoveIndexLifecyclePolicyResponse(failedIndexes); + } + + private RemoveIndexLifecyclePolicyResponse copyInstance(RemoveIndexLifecyclePolicyResponse req) { + return new RemoveIndexLifecyclePolicyResponse(new ArrayList<>(req.getFailedIndexes())); + } + + private RemoveIndexLifecyclePolicyResponse mutateInstance(RemoveIndexLifecyclePolicyResponse req) { + return new RemoveIndexLifecyclePolicyResponse(randomValueOtherThan(req.getFailedIndexes(), + () -> Arrays.asList(generateRandomStringArray(20, 20, false)))); + } + + public void testFromXContent() throws IOException { + xContentTester( + this::createParser, + this::createInstance, + this::toXContent, + RemoveIndexLifecyclePolicyResponse::fromXContent) + .supportsUnknownFields(true) + .test(); + } + + public void testNullFailedIndices() { + IllegalArgumentException exception = + expectThrows(IllegalArgumentException.class, () -> new RemoveIndexLifecyclePolicyResponse(null)); + assertEquals("failed_indexes cannot be null", exception.getMessage()); + } + + public void testHasFailures() { + RemoveIndexLifecyclePolicyResponse response = new RemoveIndexLifecyclePolicyResponse(new ArrayList<>()); + assertFalse(response.hasFailures()); + assertEquals(Collections.emptyList(), response.getFailedIndexes()); + + int size = randomIntBetween(1, 10); + List failedIndexes = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + failedIndexes.add(randomAlphaOfLength(20)); + } + response = new RemoveIndexLifecyclePolicyResponse(failedIndexes); + assertTrue(response.hasFailures()); + assertEquals(failedIndexes, response.getFailedIndexes()); + } + + public void testEqualsAndHashCode() { + for (int count = 0; count < 100; ++count) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createInstance(), this::copyInstance, this::mutateInstance); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RolloverActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RolloverActionTests.java new file mode 100644 index 0000000000000..bbbdba37e5640 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RolloverActionTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +public class RolloverActionTests extends AbstractXContentTestCase { + + @Override + protected RolloverAction doParseInstance(XContentParser parser) { + return RolloverAction.parse(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected RolloverAction createTestInstance() { + return randomInstance(); + } + + static RolloverAction randomInstance() { + ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values()); + ByteSizeValue maxSize = randomBoolean() ? null : new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); + Long maxDocs = randomBoolean() ? null : randomNonNegativeLong(); + TimeValue maxAge = (maxDocs == null && maxSize == null || randomBoolean()) + ? TimeValue.parseTimeValue(randomPositiveTimeValue(), "rollover_action_test") + : null; + return new RolloverAction(maxSize, maxAge, maxDocs); + } + + public void testNoConditions() { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new RolloverAction(null, null, null)); + assertEquals("At least one rollover condition must be set.", exception.getMessage()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ShrinkActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ShrinkActionTests.java new file mode 100644 index 0000000000000..adeec1ff825a9 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ShrinkActionTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class ShrinkActionTests extends AbstractXContentTestCase { + + @Override + protected ShrinkAction doParseInstance(XContentParser parser) throws IOException { + return ShrinkAction.parse(parser); + } + + @Override + protected ShrinkAction createTestInstance() { + return randomInstance(); + } + + static ShrinkAction randomInstance() { + return new ShrinkAction(randomIntBetween(1, 100)); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testNonPositiveShardNumber() { + Exception e = expectThrows(Exception.class, () -> new ShrinkAction(randomIntBetween(-100, 0))); + assertThat(e.getMessage(), equalTo("[number_of_shards] must be greater than 0")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StartILMRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StartILMRequestTests.java new file mode 100644 index 0000000000000..449ef7d1678eb --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StartILMRequestTests.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +public class StartILMRequestTests extends ESTestCase { + + protected StartILMRequest createTestInstance() { + return new StartILMRequest(); + } + + public void testValidate() { + StartILMRequest request = createTestInstance(); + assertFalse(request.validate().isPresent()); + } + + public void testEqualsAndHashcode() { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), (original) -> createTestInstance()); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StopILMRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StopILMRequestTests.java new file mode 100644 index 0000000000000..f1618f3f0f0e3 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StopILMRequestTests.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +public class StopILMRequestTests extends ESTestCase { + + protected StopILMRequest createTestInstance() { + return new StopILMRequest(); + } + + public void testValidate() { + StopILMRequest request = createTestInstance(); + assertFalse(request.validate().isPresent()); + } + + public void testEqualsAndHashcode() { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), (original) -> createTestInstance()); + } + +} diff --git a/docs/reference/ilm/apis/delete-lifecycle.asciidoc b/docs/reference/ilm/apis/delete-lifecycle.asciidoc new file mode 100644 index 0000000000000..3c8b6c397c07f --- /dev/null +++ b/docs/reference/ilm/apis/delete-lifecycle.asciidoc @@ -0,0 +1,89 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-delete-lifecycle]] +=== Delete Lifecycle Policy API +++++ +Delete Policy +++++ + +Deletes an existing lifecycle policy + +==== Request + +`DELETE _ilm/policy/` + +==== Description + +Deletes an existing lifecycle policy + +==== Path Parameters + +`policy` (required):: + (string) Identifier for the policy. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + DELETE operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example deletes an existing policy named `my_policy`: + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +DELETE _ilm/policy/my_policy +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc new file mode 100644 index 0000000000000..899f30d9c6c7f --- /dev/null +++ b/docs/reference/ilm/apis/explain.asciidoc @@ -0,0 +1,284 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-explain]] +=== Explain Lifecycle API +++++ +Explain Lifecycle +++++ + +Shows the current lifecycle status for an index. + +==== Request + +`GET /_ilm/explain` + +==== Description + +This API returns information relating to the current lifecycle state of an +index. This includes information such as the currently executing phase, action, +and step and the timestamp when the index entered them. It also shows the +definition of the current phase that is being run and in the event that there +has been a failure, information regarding the failure. + +==== Path Parameters + +`index` (required):: + (string) Identifier for the index. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + GET operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example retrieves the lifecycle state for the index `my_index`: + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_policy", + "index.number_of_replicas": 0 + } +} + +GET /_cluster/health?wait_for_status=green&timeout=10s +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +GET my_index/_ilm/explain +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +When the index is first taken over by ILM you will see a response like the following: + +[source,js] +-------------------------------------------------- +{ + "indices": { + "my_index": { + "index": "my_index", + "managed": true, <1> + "policy": "my_policy", <2> + "lifecycle_date": 1538475653281, <3> + "phase": "new", <4> + "phase_time": 1538475653317, <5> + "action": "complete", <6> + "action_time": 1538475653317, <7> + "step": "complete", <8> + "step_time": 1538475653317 <9> + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[s/"lifecycle_date": 1538475653281/"lifecycle_date": $body.indices.my_index.lifecycle_date/] +// TESTRESPONSE[s/"phase_time": 1538475653317/"phase_time": $body.indices.my_index.phase_time/] +// TESTRESPONSE[s/"action_time": 1538475653317/"action_time": $body.indices.my_index.action_time/] +// TESTRESPONSE[s/"step_time": 1538475653317/"step_time": $body.indices.my_index.step_time/] +<1> Shows if the index is being managed by ILM. If the index is not managed by +ILM the other fields will not be shown +<2> The name of the policy which ILM is using for this index +<3> The timestamp used for the `min_age` +<4> The current phase +<5> The timestamp for when the index entered the current phase +<6> The current action +<7> The timestamp for when the index entered the current action +<8> The current step +<9> The timestamp for when the index entered the current step + +When the policy is running on the index the response will contain a +`phase_execution` object that describes the exact phase that is being run. +Changes to the underlying policy will not affect this index until the current +phase definition has been completely executed. + +[source,js] +-------------------------------------------------- +{ + "indices": { + "test-000069": { + "index": "test-000069", + "managed": true, + "policy": "my_lifecycle3", + "lifecycle_date": "2018-10-15T13:45:21.981Z", + "phase": "hot", + "phase_time": "2018-10-15T13:45:22.577Z", + "action": "rollover", + "action_time": "2018-10-15T13:45:22.577Z", + "step": "attempt_rollover", + "step_time": "2018-10-15T13:45:22.577Z", + "phase_execution": { <1> + "policy": "my_lifecycle3", <2> + "phase_definition": { <3> + "min_age": "0ms", + "actions": { + "rollover": { + "max_age": "30s" + } + } + }, + "version": 3, <4> + "modified_date": "2018-10-15T13:21:41.576Z", <5> + "modified_date_in_millis": 1539609701576 <6> + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[skip:not possible to get the cluster into this state in a docs test] +<1> The phase execution information for this index in its current phase +<2> The policy that this phase definition was loaded from +<3> The phase definition itself. This is the JSON for the phase loaded from the +policy at the time the index entered the current phase +<4> The version of the policy at the time the phase definition was loaded +<5> The last modified date of the policy at the time the phase definition was loaded +<6> The last modified epoch time of the policy at the time the phase definition was loaded + + +If the policy is waiting for a step to complete for the index, the response will contain step information such as: + +[source,js] +-------------------------------------------------- +{ + "indices": { + "test-000020": { + "index": "test-000020", + "managed": true, + "policy": "my_lifecycle3", + "lifecycle_date": "2018-10-15T13:20:28.042Z", + "phase": "warm", + "phase_time": "2018-10-15T13:20:28.428Z", + "action": "allocate", + "action_time": "2018-10-15T13:20:28.428Z", + "step": "check-allocation", + "step_time": "2018-10-15T13:20:28.633Z", + "step_info": { <1> + "message": "Waiting for all shard copies to be active", + "shards_left_to_allocate": -1, + "all_shards_active": false, + "actual_replicas": 2 + }, + "phase_execution": { + "policy": "my_lifecycle3", + "phase_definition": { + "min_age": "0ms", + "actions": { + "allocate": { + "number_of_replicas": 2, + "include": { + "box_type": "warm" + }, + "exclude": {}, + "require": {} + }, + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "version": 2, + "modified_date": "2018-10-15T13:20:02.489Z", + "modified_date_in_millis": 1539609602489 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[skip:not possible to get the cluster into this state in a docs test] +<1> `step_info` shows information about what ILM is waiting for on this index. +In this case we are waiting for all shard copies of the index to be active. + +If the index is in the ERROR step, something has gone wrong when executing a +step in the policy and will need to be investigated and resolved for the index +to make progress. TO help determine how to resolve the error the explain response +will show the step that failed in `failed_step`, and the information on the error +that occurred in `step_info`. + +[source,js] +-------------------------------------------------- +{ + "indices": { + "test-000056": { + "index": "test-000056", + "managed": true, + "policy": "my_lifecycle3", + "lifecycle_date": "2018-10-15T13:38:26.209Z", + "phase": "hot", + "phase_time": "2018-10-15T13:38:26.706Z", + "action": "rollover", + "action_time": "2018-10-15T13:38:26.706Z", + "step": "ERROR", + "step_time": "2018-10-15T13:39:15.304Z", + "failed_step": "attempt_rollover", <1> + "step_info": { <2> + "type": "resource_already_exists_exception", + "reason": "index [test-000057/H7lF9n36Rzqa-KfKcnGQMg] already exists", + "index_uuid": "H7lF9n36Rzqa-KfKcnGQMg", + "index": "test-000057" + }, + "phase_execution": { + "policy": "my_lifecycle3", + "phase_definition": { + "min_age": "0ms", + "actions": { + "rollover": { + "max_age": "30s" + } + } + }, + "version": 3, + "modified_date": "2018-10-15T13:21:41.576Z", + "modified_date_in_millis": 1539609701576 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[skip:not possible to get the cluster into this state in a docs test] +<1> The step that caused an error +<2> Information on the error that occurred. In this case the next index already +existed when the rollover operation was performed diff --git a/docs/reference/ilm/apis/get-lifecycle.asciidoc b/docs/reference/ilm/apis/get-lifecycle.asciidoc new file mode 100644 index 0000000000000..dbc8a572903b3 --- /dev/null +++ b/docs/reference/ilm/apis/get-lifecycle.asciidoc @@ -0,0 +1,115 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-get-lifecycle]] +=== Get Lifecycle Policy API +++++ +Get Policy +++++ + +Retrieves an existing policy + +==== Request + +`GET _ilm/policy` +`GET _ilm/policy/` + +==== Description + +This API returns a policy definition along with some of its metadata like +its last modified date and version. If no path parameters are provided, then +all the policies defined will be returned. + +==== Path Parameters + +`policy` (optional):: + (string) Identifier for the policy. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + GET operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example retrieves the policy named `my_policy`: + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +GET _ilm/policy +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "my_policy": { + "version": 1, <1> + "modified_date": 82392349, <2> + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[s/"modified_date": 82392349/"modified_date": $body.my_policy.modified_date/] +<1> The version of the policy. This is increased whenever the policy is updated +<2> The timestamp when this policy was last modified diff --git a/docs/reference/ilm/apis/get-status.asciidoc b/docs/reference/ilm/apis/get-status.asciidoc new file mode 100644 index 0000000000000..8f5d2289ff2ea --- /dev/null +++ b/docs/reference/ilm/apis/get-status.asciidoc @@ -0,0 +1,55 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-get-status]] +=== Get ILM Status API +++++ +Get ILM Status +++++ + +Gets the current status for ILM. + +==== Request + +`GET /_ilm/status` + +==== Description + +This API will return the current status of the ILM plugin. The response contains +a `operation_mode` field which shows whether the ILM plugin is `STARTED`, `STOPPING` +or `STOPPED`. This `operation_mode` is controlled by the <> +and <> APIs. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + get operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example stops the ILM plugin. + +[source,js] +-------------------------------------------------- +GET _ilm/status +-------------------------------------------------- +// CONSOLE + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "operation_mode": "RUNNING" +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/apis/ilm-api.asciidoc b/docs/reference/ilm/apis/ilm-api.asciidoc new file mode 100644 index 0000000000000..1139990edf62c --- /dev/null +++ b/docs/reference/ilm/apis/ilm-api.asciidoc @@ -0,0 +1,44 @@ +[[index-lifecycle-management-api]] +== Index Lifecycle Management API + +You can use the following APIs to manage policies on indices. + +[float] +[[ilm-api-policy-endpoint]] +=== Policy Management APIs + +* <> +* <> +* <> + +[float] +[[ilm-api-index-endpoint]] +=== Index Management APIs + +* <> +* <> +* <> + +[float] +[[ilm-api-management-endpoint]] +=== Operation Management APIs + +* <> +* <> +* <> +* <> + + +include::put-lifecycle.asciidoc[] +include::get-lifecycle.asciidoc[] +include::delete-lifecycle.asciidoc[] + +include::move-to-step.asciidoc[] +include::set-policy.asciidoc[] +include::remove-policy.asciidoc[] +include::retry-policy.asciidoc[] + +include::get-status.asciidoc[] +include::explain.asciidoc[] +include::start.asciidoc[] +include::stop.asciidoc[] diff --git a/docs/reference/ilm/apis/move-to-step.asciidoc b/docs/reference/ilm/apis/move-to-step.asciidoc new file mode 100644 index 0000000000000..c34b800856c10 --- /dev/null +++ b/docs/reference/ilm/apis/move-to-step.asciidoc @@ -0,0 +1,121 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-move-to-step]] +=== Move To Step API +++++ +Move To Step +++++ + +Moves a managed index into a specific execution step its policy + +==== Request + +`POST _ilm/move/` + +==== Description + +WARNING: This is an expert API that may lead to unintended data loss. When used, +an index's policy will begin executing at the specified step. It will execute +the step specified even if it has already executed it. Since this is a, potentionally, +dangerous action, specifying both the current step and next step to move to is +required in the body of the request. + +This API changes the current step for the specified index to the step supplied in the body of the request + +==== Path Parameters + +`index` (required):: + (string) Identifier for the index. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + move operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example moves the index `my_index` from the initial step to the +forcemerge step: + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_policy" + } +} +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +POST _ilm/move/my_index +{ + "current_step": { <1> + "phase": "new", + "action": "complete", + "name": "complete" + }, + "next_step": { <2> + "phase": "warm", + "action": "forcemerge", + "name": "forcemerge" + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] +<1> The step that the index is currently expected to be executing +<2> The step that the index should move to when executing this request + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE + +NOTE: An error will be returned if the index is now longer executing the step +specified in `current_step`. This is so the index is not moved from an +unexpected step into the `next_step`. diff --git a/docs/reference/ilm/apis/put-lifecycle.asciidoc b/docs/reference/ilm/apis/put-lifecycle.asciidoc new file mode 100644 index 0000000000000..36650078db652 --- /dev/null +++ b/docs/reference/ilm/apis/put-lifecycle.asciidoc @@ -0,0 +1,82 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-put-lifecycle]] +=== Put Lifecycle Policy API +++++ +Put Policy +++++ + +Creates or updates an ILM Policy + +==== Request + +`PUT _ilm/policy/` + +==== Description + +This API creates a new Lifecycle Policy, or updates an existing one with the same +identifier. Each call will replace the existing policy and increment the `version` +associated with the policy. + +NOTE: The `version` is only for informational purposes. Only the latest version +of the policy is stored. + +==== Path Parameters + +`policy` (required):: + (string) Identifier for the policy. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + PUT operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example creates a new policy named `my_policy`: + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST + +If the request does not encounter errors, you receive the following result: +[source,js] +---- +{ + "acknowledged": true +} +---- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/apis/remove-policy.asciidoc b/docs/reference/ilm/apis/remove-policy.asciidoc new file mode 100644 index 0000000000000..8ee313f4e30c3 --- /dev/null +++ b/docs/reference/ilm/apis/remove-policy.asciidoc @@ -0,0 +1,98 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-remove-policy]] +=== Remove Policy On Index API +++++ +Remove Policy From Index +++++ + +Unassigns a policy from a specified index pattern + +==== Request + +`DELETE /_ilm` + +==== Description + +This action removes a policy from managing an index. It is effectively the same as setting an index's +`index.lifecycle.name` setting to null. + +==== Path Parameters + +`index` (required):: + (string) Identifier for the index. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example removes a policy `my_policy` from an index `my_index`. + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_policy" + } +} +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +DELETE my_index/_ilm +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "has_failures" : false, + "failed_indexes" : [] +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/apis/retry-policy.asciidoc b/docs/reference/ilm/apis/retry-policy.asciidoc new file mode 100644 index 0000000000000..7c81f9423ef12 --- /dev/null +++ b/docs/reference/ilm/apis/retry-policy.asciidoc @@ -0,0 +1,59 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-retry-policy]] +=== Retry Policy Execution API +++++ +Retry Policy Execution +++++ + +Retry executing the policy for an index which has errored. + +==== Request + +`POST /_ilm/retry` + +==== Description + +This API will re-run a policy is currently in the ERROR step. It will set the +policy back to the step where the error occurred and attempt to re-execute it. +Information on whether an index is in the ERROR step can be obtained from the +<> + +==== Path Parameters + +`index` (required):: + (string) Identifier for the indices to retry in comma-separated format. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + retry operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example retries the policy for index `my_index`. + +[source,js] +-------------------------------------------------- +POST my_index/_ilm/retry +-------------------------------------------------- +// NOTCONSOLE + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ilm/apis/start.asciidoc b/docs/reference/ilm/apis/start.asciidoc new file mode 100644 index 0000000000000..073a584e4d872 --- /dev/null +++ b/docs/reference/ilm/apis/start.asciidoc @@ -0,0 +1,90 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-start]] +=== Start ILM API +++++ +Start ILM +++++ + +Start the ILM plugin + +==== Request + +`POST /_ilm/start` + +==== Description + +This API will start the ILM plugin if it is currently stopped. ILM is started +by default when the cluster is formed so this API is only needed if ILM has +been stopped using the <>. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + start operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example starts the ILM plugin. + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index + +POST _ilm/stop +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +POST _ilm/start +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/apis/stop.asciidoc b/docs/reference/ilm/apis/stop.asciidoc new file mode 100644 index 0000000000000..cdc038adabcfc --- /dev/null +++ b/docs/reference/ilm/apis/stop.asciidoc @@ -0,0 +1,101 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-stop]] +=== Stop ILM API +++++ +Stop ILM +++++ + +Stop the ILM plugin. + +==== Request + +`POST /_ilm/stop` + +==== Description + +This API will stop the ILM plugin. This can be used for period where +maintenance is required and ILM should not perform any actions on any indices. +The API will return as soon as the stop request has been acknowledged but the +plugin may not immediately stop but rather need to wait for some operations +to finish before it's stopped. Progress can be seen using the +<> API. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the response. When this + period of time elapses, the API fails and returns an error. The default value + is `30s`. For more information about time units, see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example stops the ILM plugin. + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +POST _ilm/stop +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE + +////////////////////////// + +[source,js] +-------------------------------------------------- +POST _ilm/start +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +////////////////////////// diff --git a/docs/reference/ilm/get-index-lifecycle-information.asciidoc b/docs/reference/ilm/get-index-lifecycle-information.asciidoc new file mode 100644 index 0000000000000..3d5dc8a172010 --- /dev/null +++ b/docs/reference/ilm/get-index-lifecycle-information.asciidoc @@ -0,0 +1,11 @@ +[role="xpack"] +[[get-index-lifecycle-information]] +== Get index lifecycle information + +Execution Model +Discuss how actions are actually split up into discrete steps and how you can see more information about where an index is within a policy (info and all) +Talk about the jump-to-step API +Error Handling +Show error in explain api +Demonstrate the retry API +Show how to get a sense of progress for things like the allocate step diff --git a/docs/reference/ilm/getting-started-ilm.asciidoc b/docs/reference/ilm/getting-started-ilm.asciidoc new file mode 100644 index 0000000000000..ad3596e92a1ad --- /dev/null +++ b/docs/reference/ilm/getting-started-ilm.asciidoc @@ -0,0 +1,15 @@ +[role="xpack"] +[[getting-started-index-lifecycle-management]] +== Getting started with {ilm} + +Create a policy that rolls over after 1 day deletes an index after 30 days + +Show create policy API req/res + +Show assign policy to index API req/res + +Show both the API and how it is done with `index.lifecyce.name` using the +create-index API + +Show explain API to show current state, but ignore the “step” related info, +only focus on managed/phase/action diff --git a/docs/reference/ilm/index.asciidoc b/docs/reference/ilm/index.asciidoc new file mode 100644 index 0000000000000..d85f92fb1c28a --- /dev/null +++ b/docs/reference/ilm/index.asciidoc @@ -0,0 +1,62 @@ +[role="xpack"] +[testenv="basic"] +[[index-lifecycle-management]] += Managing Indices + +:ilm: index lifecycle management +:ILM: Index lifecycle management +[partintro] +-- +The <> enable you to automate how you +want to manage your indices over time. Rather than simply performing management +actions on your indices on a set schedule, you can base actions on other factors +such as shard size and performance requirements. + +You control how indices are handled as they age by attaching a +lifecycle policy to the index template used to create them. You can update +the policy to modify the lifecycle of both new and existing indices. + +For time series indices, there are four stages in the index lifecycle: + +* Hot--the index is actively being updated and queried. +* Warm--the index is no longer being updated, but is still being queried. +* Cold--the index is no longer being updated and is seldom queried. The +information still needs to be searchable, but it's okay if those queries are +slower. +* Delete--the index is no longer needed and can safely be deleted. + +The lifecycle policy governs how the index transitions through these stages and +the actions that are performed on the index at each stage. The policy can +specify: + +* The maximum size or age at which you want to roll over to a new index. +* The point at which the index is no longer being updated and the number of +primary shards can be reduced. +* When to force a merge to permanently delete documents marked for deletion. +* The point at which the index can be moved to less performant hardware. +* The point at which the availability is not as critical and the number of +replicas can be reduced. +* When the index can be safely deleted. + +For example, if you are indexing metrics data from a fleet of ATMs into +Elasticsearch, you might define a policy that says: + +. When the index reaches 5GB, roll over to a new index. +. Move the old index into the warm stage, mark it read only, and shrink it down +to a single shard. +. After 7 days, move the index into the cold stage and move it to less expensive +hardware. +. Delete the index once the required 30 day retention period is reached. +-- + +include::getting-started-ilm.asciidoc[] + +include::using-policies-rollover.asciidoc[] + +include::set-up-lifecycle-policy.asciidoc[] + +include::update-lifecycle-policy.asciidoc[] + +include::get-index-lifecycle-information.asciidoc[] + +include::start-stop-ilm.asciidoc[] diff --git a/docs/reference/ilm/set-up-lifecycle-policy.asciidoc b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc new file mode 100644 index 0000000000000..7f5bb84c598a4 --- /dev/null +++ b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc @@ -0,0 +1,112 @@ +[role="xpack"] +[testenv="basic"] +[[set-up-lifecycle-policy]] +== Set up {ilm} policy + +In order for an index to use an {ilm} policy to manage its lifecycle we must +first define a lifecycle policy for it to use. The following request creates +a policy called `my_policy` in Elasticsearch which we can later use to manage +our indexes. + +[source,js] +------------------------ +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "25GB" <1> + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} <2> + } + } + } + } +} +------------------------ +// CONSOLE +<1> Rollover the index when it reaches 25GB in size +<2> Delete the index when its 30 days old + +{ilm} will manage an index using the policy defined in the +`index.lifecycle.name` index setting. If this setting does not exist in the +settings for a particular index {ilm} will not manage that index. + +To set the policy for an index there are two options: +1. Apply the policy to an index template and bootstrap creating the first index +2. Apply the policy to a new index in a create index request + +=== Applying a policy to an index template + +The `index.lifecycle.name` setting can be set in an index template so that it +is automatically applied to indexes matching the templates index pattern: + +[source,js] +----------------------- +PUT _template/my_template +{ + "index_patterns": ["test-*"], <1> + "settings": { + "number_of_shards": 1, + "number_of_replicas": 1, + "index.lifecycle.name": "my_policy", <2> + "index.lifecycle.rollover_alias": "test-alias" + } +} +----------------------- +// CONSOLE +<1> This template will be applied to all indexes which have a name starting +with `test-` +<2> The template will set the policy to be used to `my_policy` + +Now that a policy exists and is used in an index template we can create an +initial index which will be managed by our policy: + +[source,js] +----------------------- +PUT test-000001 +{ + "aliases": { + "test-alias":{ + "is_write_index": true <1> + } + } +} +----------------------- +// CONSOLE +<1> Set this initial index to be the write index for this alias. + +We can now write data to the `test-alias` alias. Because we have a rollover +action defined in our policy when the index grows larger than 25GB {ilm} will +create a new index and roll the alias over to use the new index automatically. + +=== Apply a policy to a create index request + +The `index.lifecycle.name` setting can be set on an individual create index +request so {ilm} immediately starts managing the index: + +[source,js] +----------------------- +PUT test-index +{ + "settings": { + "number_of_shards": 1, + "number_of_replicas": 1, + "index.lifecycle.name": "my_policy" + } +} +----------------------- +// CONSOLE + +IMPORTANT: Its recommended not to use the create index API with a policy that +defines a rollover action. If you do so, the new index as the result of the +rollover will not carry forward the policy. Always use index templates to +define policies with rollover actions. + diff --git a/docs/reference/ilm/start-stop-ilm.asciidoc b/docs/reference/ilm/start-stop-ilm.asciidoc new file mode 100644 index 0000000000000..938b97d44721f --- /dev/null +++ b/docs/reference/ilm/start-stop-ilm.asciidoc @@ -0,0 +1,168 @@ +[role="xpack"] +[testenv="basic"] +[[start-stop-ilm]] +== Start And Stop {ilm} + +All indices that are managed by ILM will continue to execute +their policies. There may be times when this is not desired on certain +indices, or maybe even all the indices in a cluster. For example, +maybe there are scheduled maintenance windows when cluster topology +changes are desired that may impact running ILM actions. For this reason, +ILM has two ways to disable operations. + +Normally, ILM will be running by default. +To see the current operating status of ILM, use the <> +to see the current state of ILM. + +//// +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_policy" + } +} +-------------------------------------------------- +// CONSOLE +//// + +[source,js] +-------------------------------------------------- +GET _ilm/status +-------------------------------------------------- +// CONSOLE + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "operation_mode": "RUNNING" +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE + +The operating modes of ILM: + + +.ILM Operating Modes +|=== +|Name |Description +|RUNNING |Normal operation where all policies are executed as normal +|STOPPING|ILM has received a request to stop but is still processing some policies +|STOPPED |This represents a state where no policies are executed +|=== + +=== Stopping ILM + +The ILM service can be paused such that no further steps will be executed +using the <>. + +[source,js] +-------------------------------------------------- +POST _ilm/stop +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +When stopped, all further policy actions will be halted. This will +be reflected in the Status API + +//// +[source,js] +-------------------------------------------------- +GET _ilm/status +-------------------------------------------------- +// CONSOLE +// TEST[continued] +//// + +[source,js] +-------------------------------------------------- +{ + "operation_mode": "STOPPING" +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE + +The ILM service will then, asynchronously, run all policies to a point +where it is safe to stop. After ILM verifies that it is safe, it will +move to the `STOPPED` mode. + +//// +[source,js] +-------------------------------------------------- +PUT trigger_ilm_cs_action + +GET _ilm/status +-------------------------------------------------- +// CONSOLE +// TEST[continued] +//// + +[source,js] +-------------------------------------------------- +{ + "operation_mode": "STOPPED" +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE + +=== Starting ILM + +To start ILM and continue executing policies, use the <>. + + +[source,js] +-------------------------------------------------- +POST _ilm/start +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +//// +[source,js] +-------------------------------------------------- +GET _ilm/status +-------------------------------------------------- +// CONSOLE +// TEST[continued] +//// + +The Start API will send a request to the ILM service to immediately begin +normal operations. + +[source,js] +-------------------------------------------------- +{ + "operation_mode": "RUNNING" +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/update-lifecycle-policy.asciidoc b/docs/reference/ilm/update-lifecycle-policy.asciidoc new file mode 100644 index 0000000000000..1711b09796406 --- /dev/null +++ b/docs/reference/ilm/update-lifecycle-policy.asciidoc @@ -0,0 +1,432 @@ +[role="xpack"] +[testenv="basic"] +[[update-lifecycle-policy]] +== Update lifecycle policy + +Updating existing ILM policies is useful to fix mistakes or change +strategies for newly created indices. It is possible to update policy definitions +and an index's `index.lifecycle.name` settings independently. To prevent the situation +that phase definitions are modified while currently being executed on an index, each index +will keep the version of the current phase definition it began execution with until it completes. + +There are three scenarios for examining the behavior updating policies and +their effects on policy execution on indices. + +=== Updates to policies not managing indices + +Indices not referencing an existing policy that is updated will not be affected. +If an index is assigned to the policy, it will be assigned the latest version of that policy + +To show this, let's create a policy `my_policy`. + +[source,js] +------------------------ +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "25GB" + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +------------------------ +// CONSOLE + +This newly defined policy will be created and assigned to have a version equal +to 1. Since we haven't assigned any indices to this policy, any updates that +occur will be reflected completely on indices that are newly set to be managed +by this policy. + +Updating the Delete phase's minimum age can be done in an update request. + +[source,js] +------------------------ +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "25GB" + } + } + }, + "delete": { + "min_age": "10d", <1> + "actions": { + "delete": {} + } + } + } + } +} +------------------------ +// CONSOLE +// TEST[continued] +<1> update `min_age` to 10 days + +////////// +[source,js] +-------------------------------------------------- +GET _ilm/policy +-------------------------------------------------- +// CONSOLE +// TEST[continued] +////////// + +When we get the policy, we will see it reflect our latest changes, but +with its version bumped to 2. + +[source,js] +-------------------------------------------------- +{ + "my_policy": { + "version": 2, <1> + "modified_date": 82392349, <2> + "policy": { + "phases": { + "hot": { + "min_age": "0ms", + "actions": { + "rollover": { + "max_size": "25gb" + } + } + }, + "delete": { + "min_age": "10d", + "actions": { + "delete": {} + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[s/"modified_date": 82392349/"modified_date": $body.my_policy.modified_date/] +<1> The updated version value +<2> The timestamp when this policy was updated last. + +Afterwords, any indices set to `my_policy` will execute against version 2 of +the policy. + +=== Updates to executing policies + +Indices preserve the phase definition from the latest policy version that existed +at the time that it entered that phase. Changes to the currently-executing phase within policy updates will +not be reflected during execution. This means that updates to the `hot` phase, for example, will not affect +indices that are currently executing the corresponding `hot` phase. + +Let's say we have an index `my_index` managed by the below `my_executing_policy` definition. + +[source,js] +------------------------ +PUT _ilm/policy/my_executing_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_docs": 1 + } + } + }, + "delete": { + "min_age": "10d", + "actions": { + "delete": {} + } + } + } + } +} +------------------------ +// CONSOLE + +//// +[source,js] +------------------------ +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_executing_policy" + } +} +------------------------ +// CONSOLE +// TEST[continued] +//// + +The <> is useful to introspect managed indices to see which phase definition they are currently executing. +Using this API, we can find out that `my_index` is currently attempting to be rolled over. + +[source,js] +-------------------------------------------------- +GET my_index/_ilm/explain +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +{ + "indices": { + "my_index": { + "index": "my_index", + "managed": true, + "policy": "my_executing_policy", + "lifecycle_date": 1538475653281, + "phase": "hot", + "phase_time": 1538475653317, + "action": "rollover", + "action_time": 1538475653317, + "step": "attempt_rollover", + "step_time": 1538475653317, + "phase_execution": { + "policy": "my_executing_policy", + "modified_date_in_millis": 1538475653317, + "version": 1, + "phase_definition": { + "min_age": "0ms", + "actions": { + "rollover": { + "max_docs": 1 + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[s/"lifecycle_date": 1538475653281/"lifecycle_date": $body.indices.my_index.lifecycle_date/] +// TESTRESPONSE[s/"phase_time": 1538475653317/"phase_time": $body.indices.my_index.phase_time/] +// TESTRESPONSE[s/"action_time": 1538475653317/"action_time": $body.indices.my_index.action_time/] +// TESTRESPONSE[s/"step_time": 1538475653317/"step_time": $body.indices.my_index.step_time/] +// TESTRESPONSE[s/"modified_date_in_millis": 1538475653317/"modified_date_in_millis": $body.indices.my_index.phase_execution.modified_date_in_millis/] + +Updating `my_executing_policy` to have no rollover action and, instead, go directly into a newly introduced `warm` phase. + +[source,js] +------------------------ +PUT _ilm/policy/my_executing_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "1d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "10d", + "actions": { + "delete": {} + } + } + } + } +} +------------------------ +// CONSOLE +// TEST[continued] + +Now, version 2 of this policy has no `hot` phase, but if we run the Explain API again, we will see that nothing has changed. +The index `my_index` is still executing version 1 of the policy. + +//// +[source,js] +-------------------------------------------------- +GET my_index/_ilm/explain +-------------------------------------------------- +// CONSOLE +// TEST[continued] +//// + +[source,js] +-------------------------------------------------- +{ + "indices": { + "my_index": { + "index": "my_index", + "managed": true, + "policy": "my_executing_policy", + "lifecycle_date": 1538475653281, + "phase": "hot", + "phase_time": 1538475653317, + "action": "rollover", + "action_time": 1538475653317, + "step": "attempt_rollover", + "step_time": 1538475653317, + "phase_execution": { + "policy": "my_executing_policy", + "modified_date_in_millis": 1538475653317, + "version": 1, + "phase_definition": { + "min_age": "0ms", + "actions": { + "rollover": { + "max_docs": 1 + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[s/"lifecycle_date": 1538475653281/"lifecycle_date": $body.indices.my_index.lifecycle_date/] +// TESTRESPONSE[s/"phase_time": 1538475653317/"phase_time": $body.indices.my_index.phase_time/] +// TESTRESPONSE[s/"action_time": 1538475653317/"action_time": $body.indices.my_index.action_time/] +// TESTRESPONSE[s/"step_time": 1538475653317/"step_time": $body.indices.my_index.step_time/] +// TESTRESPONSE[s/"modified_date_in_millis": 1538475653317/"modified_date_in_millis": $body.indices.my_index.phase_execution.modified_date_in_millis/] + +After indexing one document into `my_index` so that rollover succeeds and moves onto the next phase, we will notice something new. The +index will move into the next phase in the updated version 2 of its policy. + +//// +[source,js] +-------------------------------------------------- +PUT my_index/_doc/1 +{ + "foo": "bar" +} + +GET my_index/_ilm/explain +-------------------------------------------------- +// CONSOLE +// TEST[continued] +//// + +[source,js] +-------------------------------------------------- +{ + "indices": { + "my_index": { + "index": "my_index", + "managed": true, + "policy": "my_executing_policy", + "lifecycle_date": 1538475653281, + "phase": "warm", + "phase_time": 1538475653317, + "action": "forcemerge", + "action_time": 1538475653317, + "step": "forcemerge", + "step_time": 1538475653317, + "phase_execution": { + "policy": "my_executing_policy", + "modified_date_in_millis": 1538475653317, + "version": 2, <1> + "phase_definition": { + "min_age": "1d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[skip:There is no way to force the index to move to the next step in a timely manner] +<1> The index has moved to using version 2 of the policy + +`my_index` will move to the next phase in the latest policy definition, which is the newly added `warm` phase. + +=== Switching policies for an index + +Setting `index.lifecycle.name` to a different policy behaves much like a policy update, but instead of just +switching to a different version, it switches to a different policy. + +After setting a policy for an index, we can switch out `my_policy` with +`my_other_policy` by just updating the index's `index.lifecycle.name` +setting to the new policy. After completing its currently executed phase, +it will move on to the next phase in `my_other_policy`. So if it was on the +`hot` phase before, it will move to the `delete` phase after the `hot` phase concluded. + +//// +[source,js] +------------------------ +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "25GB" + } + } + }, + "delete": { + "min_age": "10d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT _ilm/policy/my_other_policy +{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_policy" + } +} +------------------------ +// CONSOLE + +//// + +[source,js] +-------------------------------------------------- +PUT my_index/_settings +{ + "lifecycle.name": "my_other_policy" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The change to the new policy will not happen immediately. The currently executing phase +of the existing policy for `my_index` will continue to execute until it completes. Once +completed, `my_index` will move to being managed by the `my_other_policy`. diff --git a/docs/reference/ilm/using-policies-rollover.asciidoc b/docs/reference/ilm/using-policies-rollover.asciidoc new file mode 100644 index 0000000000000..f7982af4fec81 --- /dev/null +++ b/docs/reference/ilm/using-policies-rollover.asciidoc @@ -0,0 +1,117 @@ +[role="xpack"] +[testenv="basic"] +[[using-policies-rollover]] +== Using policies to manage index rollover + +The rollover action enables you to automatically roll over to a new index based +on the index size, document count, or age. When a rollover is triggered, a new +index is created, the write alias is updated to point to the new index, and all +subsequent updates are written to the new index. + +Rolling over to a new index based on size, document count, or age is preferable +to time-based rollovers. Rolling over at an arbitrary time often results in +many small indices, which can have a negative impact on performance and +resource usage. + +You control when the rollover action is triggered by specifying one or more +rollover parameters. The rollover is performed once any of the criteria are +met. Because the criteria are checked periodically, the index might grow +slightly beyond the specified threshold. To control how often the critera are +checked, specify the `indices.lifecycle.poll_interval` cluster setting. + +The rollover action takes the following parameters: + +.`rollover` Action Parameters +|=== +|Name |Description +|max_size |The maximum estimated size the index is allowed to grow +to. Defaults tonull. Optional. +|max_docs |The maximum number of document the index should +contain. Defaults tonull. Optional. +|max_age |The maximum age of the index. Defaults to `null`. Optional. +|=== + +These parameters are used to determine when the index is considered "full" and +a rollover should be performed. Where multiple criteria are defined the +rollover operation will be performed once any of the criteria are met. + +The following request defines a policy with a rollover action that triggers +when the index size reaches 25GB. The old index is subsequently deleted after +30 days. + +NOTE: Once an index rolls over, {ilm} uses the timestamp of the rollover +operation rather than the index creation time to evaluate when to move the +index to the next phase. For indices that have rolled over, the `min_age` +criteria specified for a phase is relative to the rollover time for indices. In +this example, that means the index will be deleted 30 days after rollover, not +30 days from when the index was created. + +[source,js] +-------------------------------------------------- +PUT /_ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "25GB" + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +To use an {ilm} policy, you need to specify it in the index template used to +create the indices. For example, the following template associates `my_policy` +with indices created from the template `my_template`. + +[source,js] +----------------------- +PUT _template/my_template +{ + "index_patterns": ["test-*"], <1> + "settings": { + "number_of_shards": 1, + "number_of_replicas": 1, + "index.lifecycle.name": "my_policy", <2> + "index.lifecycle.rollover_alias": "test-alias" <3> + } +} +----------------------- +// CONSOLE +<1> Template applies to all indices with the prefix test- +<2> Associates my_policy with all indices created with this template +<3> Rolls over the write alias test when the rollover action is triggered + +To be able to start using the policy for these `test-*` indexes we need to +bootstrap the process by creating the first index. + +[source,js] +----------------------- +PUT test-000001 <1> +{ + "aliases": { + "test-alias":{ + "is_write_index": true <2> + } + } +} +----------------------- +// CONSOLE +<1> Creates the index called test-000001. The rollover action increments the +suffix number for each subsequent index. +<2> Designates this index as the write index for this alias. + +When the rollover is performed, the newly-created index is set as the write +index for the rolled over alias. Documents sent to the alias are indexed into +the new index, enabling indexing to continue uninterrupted. diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 2905688f7be78..f9175af61f639 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -57,6 +57,8 @@ include::index-modules.asciidoc[] include::ingest.asciidoc[] +include::ilm/index.asciidoc[] + include::ccr/index.asciidoc[] include::sql/index.asciidoc[] diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 5aef27e127500..eedc2dfa1f51f 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -10,6 +10,7 @@ directly to configure and access {xpack} features. * <> * <> * <> +* <> * <> * <> * <> @@ -22,6 +23,7 @@ directly to configure and access {xpack} features. include::info.asciidoc[] include::{es-repo-dir}/ccr/apis/ccr-apis.asciidoc[] include::{es-repo-dir}/graph/explore.asciidoc[] +include::{es-repo-dir}/ilm/apis/ilm-api.asciidoc[] include::{es-repo-dir}/licensing/index.asciidoc[] include::{es-repo-dir}/migration/migration.asciidoc[] include::{es-repo-dir}/ml/apis/ml-api.asciidoc[] diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index 6d5967d0cdc7d..fc6488010dabd 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -68,6 +68,11 @@ Example response: "available" : true, "enabled" : true }, + "ilm" : { + "description" : "Index lifecycle management for the Elastic Stack", + "available" : true, + "enabled" : true + }, "logstash" : { "description" : "Logstash management component for X-Pack", "available" : true, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java index afbc9a554ed5e..1b385ed9d0dbc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java @@ -71,6 +71,10 @@ public final String toString() { return "[" + name + ": " + value + "]"; } + public T value() { + return value; + } + /** * Holder for index stats used to evaluate conditions */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index fe5ad65c4799b..f36636594a4d6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -196,7 +196,7 @@ public boolean isDryRun() { return dryRun; } - Map getConditions() { + public Map getConditions() { return conditions; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java index b7e4294a5635c..356f805c24bd4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java @@ -68,8 +68,8 @@ public final class RolloverResponse extends ShardsAcknowledgedResponse implement RolloverResponse() { } - RolloverResponse(String oldIndex, String newIndex, Map conditionResults, - boolean dryRun, boolean rolledOver, boolean acknowledged, boolean shardsAcknowledged) { + public RolloverResponse(String oldIndex, String newIndex, Map conditionResults, + boolean dryRun, boolean rolledOver, boolean acknowledged, boolean shardsAcknowledged) { super(acknowledged, shardsAcknowledged); this.oldIndex = oldIndex; this.newIndex = newIndex; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 18c7d506c7275..96804248f62d9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -88,7 +88,7 @@ public String[] indices() { return indices; } - Settings settings() { + public Settings settings() { return settings; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java index c116bd896c81f..ffe59f1e3ae18 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java @@ -38,7 +38,7 @@ public final class ResizeResponse extends CreateIndexResponse { ResizeResponse() { } - ResizeResponse(boolean acknowledged, boolean shardsAcknowledged, String index) { + public ResizeResponse(boolean acknowledged, boolean shardsAcknowledged, String index) { super(acknowledged, shardsAcknowledged, index); } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java index fc14cd38e5681..c012a9c8215ce 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java @@ -35,6 +35,14 @@ public abstract class ClusterInfoRequest taskName.startsWith("xpack/rollup/job") == false); } + private static void deleteAllPolicies() throws IOException { + Map policies; + + try { + Response response = adminClient().performRequest(new Request("GET", "/_ilm/policy")); + policies = entityAsMap(response); + } catch (ResponseException e) { + if (RestStatus.METHOD_NOT_ALLOWED.getStatus() == e.getResponse().getStatusLine().getStatusCode()) { + // If bad request returned, ILM is not enabled. + return; + } + throw e; + } + + if (policies == null || policies.isEmpty()) { + return; + } + + for (String policyName : policies.keySet()) { + adminClient().performRequest(new Request("DELETE", "/_ilm/policy/" + policyName)); + } + } + /** * Logs a message if there are still running tasks. The reasoning is that any tasks still running are state the is trying to bleed into * other tests. @@ -698,6 +735,14 @@ protected static void createIndex(String name, Settings settings, String mapping client().performRequest(request); } + protected static void createIndex(String name, Settings settings, String mapping, String aliases) throws IOException { + Request request = new Request("PUT", "/" + name); + request.setJsonEntity("{\n \"settings\": " + Strings.toString(settings) + + ", \"mappings\" : {" + mapping + "}" + + ", \"aliases\": {" + aliases + "} }"); + client().performRequest(request); + } + protected static void deleteIndex(String name) throws IOException { Request request = new Request("DELETE", "/" + name); client().performRequest(request); diff --git a/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java index 861d574b3465e..84f9b4dfd149d 100644 --- a/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java @@ -66,4 +66,9 @@ protected boolean preserveClusterSettings() { protected boolean preserveRollupJobsUponCompletion() { return true; } + + @Override + protected boolean preserveILMPoliciesUponCompletion() { + return true; + } } diff --git a/x-pack/build.gradle b/x-pack/build.gradle index 0b1406519685b..e1c72c734798e 100644 --- a/x-pack/build.gradle +++ b/x-pack/build.gradle @@ -34,6 +34,7 @@ subprojects { ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-core:${version}": xpackModule('core')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-deprecation:${version}": xpackModule('deprecation')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-graph:${version}": xpackModule('graph')] + ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-ilm:${version}": xpackModule('ilm')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-logstash:${version}": xpackModule('logstash')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-ml:${version}": xpackModule('ml')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-monitoring:${version}": xpackModule('monitoring')] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 4aa697e09fc5a..f5dab8ff8d4f8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -605,6 +605,22 @@ public synchronized boolean isUpgradeAllowed() { return status.active; } + /** + * Determine if Index Lifecycle API should be enabled. + *

+ * Index Lifecycle API is available in for all license types except + * {@link OperationMode#MISSING} + * + * @return {@code true} as long as the license is valid. Otherwise + * {@code false}. + */ + public boolean isIndexLifecycleAllowed() { + // status is volatile + Status localStatus = status; + // Should work on all active licenses + return localStatus.active; + } + /** * Determine if SQL support should be enabled. *

diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java index c73bb8576a7ad..7dae8856921e8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java @@ -40,6 +40,7 @@ public final class ClientHelper { public static final String SECURITY_ORIGIN = "security"; public static final String WATCHER_ORIGIN = "watcher"; public static final String ML_ORIGIN = "ml"; + public static final String INDEX_LIFECYCLE_ORIGIN = "index_lifecycle"; public static final String MONITORING_ORIGIN = "monitoring"; public static final String DEPRECATION_ORIGIN = "deprecation"; public static final String PERSISTENT_TASK_ORIGIN = "persistent_tasks"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java index 3f27f66b27b77..db8981055d24d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java @@ -14,6 +14,7 @@ import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackInfoRequestBuilder; import org.elasticsearch.xpack.core.ccr.client.CcrClient; +import org.elasticsearch.xpack.core.indexlifecycle.client.ILMClient; import org.elasticsearch.xpack.core.ml.client.MachineLearningClient; import org.elasticsearch.xpack.core.monitoring.client.MonitoringClient; import org.elasticsearch.xpack.core.security.client.SecurityClient; @@ -36,6 +37,7 @@ public class XPackClient { private final SecurityClient securityClient; private final WatcherClient watcherClient; private final MachineLearningClient machineLearning; + private final ILMClient ilmClient; public XPackClient(Client client) { this.client = Objects.requireNonNull(client, "client"); @@ -45,6 +47,7 @@ public XPackClient(Client client) { this.securityClient = new SecurityClient(client); this.watcherClient = new WatcherClient(client); this.machineLearning = new MachineLearningClient(client); + this.ilmClient = new ILMClient(client); } public Client es() { @@ -75,6 +78,10 @@ public MachineLearningClient machineLearning() { return machineLearning; } + public ILMClient ilmClient() { + return ilmClient; + } + public XPackClient withHeaders(Map headers) { return new XPackClient(client.filterWithHeader(headers)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index aba2cea177f53..df1f985d46b97 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -37,12 +37,30 @@ import org.elasticsearch.transport.Transport; import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackUsageAction; +import org.elasticsearch.xpack.core.beats.BeatsFeatureSetUsage; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.deprecation.DeprecationInfoAction; import org.elasticsearch.xpack.core.graph.GraphFeatureSetUsage; import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; +import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; +import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; +import org.elasticsearch.xpack.core.indexlifecycle.ForceMergeAction; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.ReadOnlyAction; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.ExplainLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction; import org.elasticsearch.xpack.core.logstash.LogstashFeatureSetUsage; -import org.elasticsearch.xpack.core.beats.BeatsFeatureSetUsage; import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; @@ -135,8 +153,8 @@ import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExceptExpression; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; -import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges; import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges; import org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.action.GetCertificateInfoAction; @@ -317,7 +335,15 @@ public List getClientActions() { StopRollupJobAction.INSTANCE, DeleteRollupJobAction.INSTANCE, GetRollupJobsAction.INSTANCE, - GetRollupCapsAction.INSTANCE + GetRollupCapsAction.INSTANCE, + // ILM + DeleteLifecycleAction.INSTANCE, + GetLifecycleAction.INSTANCE, + PutLifecycleAction.INSTANCE, + ExplainLifecycleAction.INSTANCE, + RemoveIndexLifecyclePolicyAction.INSTANCE, + MoveToStepAction.INSTANCE, + RetryAction.INSTANCE ); } @@ -371,9 +397,27 @@ public List getNamedWriteables() { new NamedWriteableRegistry.Entry(Task.Status.class, RollupJobStatus.NAME, RollupJobStatus::new), new NamedWriteableRegistry.Entry(PersistentTaskState.class, RollupJobStatus.NAME, RollupJobStatus::new), // ccr + new NamedWriteableRegistry.Entry(AutoFollowMetadata.class, AutoFollowMetadata.TYPE, AutoFollowMetadata::new), new NamedWriteableRegistry.Entry(MetaData.Custom.class, AutoFollowMetadata.TYPE, AutoFollowMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, AutoFollowMetadata.TYPE, - in -> AutoFollowMetadata.readDiffFrom(MetaData.Custom.class, AutoFollowMetadata.TYPE, in)) + in -> AutoFollowMetadata.readDiffFrom(MetaData.Custom.class, AutoFollowMetadata.TYPE, in)), + // ILM + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.INDEX_LIFECYCLE, + IndexLifecycleFeatureSetUsage::new), + // ILM - Custom Metadata + new NamedWriteableRegistry.Entry(MetaData.Custom.class, IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata::new), + new NamedWriteableRegistry.Entry(NamedDiff.class, IndexLifecycleMetadata.TYPE, + IndexLifecycleMetadata.IndexLifecycleMetadataDiff::new), + // ILM - LifecycleTypes + new NamedWriteableRegistry.Entry(LifecycleType.class, TimeseriesLifecycleType.TYPE, + (in) -> TimeseriesLifecycleType.INSTANCE), + // ILM - Lifecycle Actions + new NamedWriteableRegistry.Entry(LifecycleAction.class, AllocateAction.NAME, AllocateAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ForceMergeAction.NAME, ForceMergeAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ReadOnlyAction.NAME, ReadOnlyAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, RolloverAction.NAME, RolloverAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ShrinkAction.NAME, ShrinkAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, DeleteAction.NAME, DeleteAction::new) ); } @@ -404,7 +448,7 @@ public List getNamedXContent() { RollupJobStatus::fromXContent), new NamedXContentRegistry.Entry(PersistentTaskState.class, new ParseField(RollupJobStatus.NAME), RollupJobStatus::fromXContent) - ); + ); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java index 70eb047c8edef..0e6888dd80d73 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java @@ -31,6 +31,8 @@ public final class XPackField { public static final String SQL = "sql"; /** Name constant for the rollup feature. */ public static final String ROLLUP = "rollup"; + /** Name constant for the index lifecycle feature. */ + public static final String INDEX_LIFECYCLE = "ilm"; private XPackField() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index ac1dad2f1c7e4..9378b71b4b014 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -3,6 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.core; import org.elasticsearch.common.network.NetworkModule; @@ -81,6 +82,12 @@ private XPackSettings() { public static final Setting BEATS_ENABLED = Setting.boolSetting("xpack.beats.enabled", true, Setting.Property.NodeScope); + /** + * Setting for enabling or disabling the index lifecycle extension. Defaults to true. + */ + public static final Setting INDEX_LIFECYCLE_ENABLED = Setting.boolSetting("xpack.ilm.enabled", true, + Setting.Property.NodeScope); + /** Setting for enabling or disabling TLS. Defaults to false. */ public static final Setting TRANSPORT_SSL_ENABLED = Setting.boolSetting("xpack.security.transport.ssl.enabled", false, Property.NodeScope); @@ -194,6 +201,7 @@ public static List> getAllSettings() { settings.add(USER_SETTING); settings.add(ROLLUP_ENABLED); settings.add(PASSWORD_HASHING_ALGORITHM); + settings.add(INDEX_LIFECYCLE_ENABLED); return Collections.unmodifiableList(settings); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateAction.java new file mode 100644 index 0000000000000..9cd74353237ac --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateAction.java @@ -0,0 +1,183 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class AllocateAction implements LifecycleAction { + + public static final String NAME = "allocate"; + public static final ParseField NUMBER_OF_REPLICAS_FIELD = new ParseField("number_of_replicas"); + public static final ParseField INCLUDE_FIELD = new ParseField("include"); + public static final ParseField EXCLUDE_FIELD = new ParseField("exclude"); + public static final ParseField REQUIRE_FIELD = new ParseField("require"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + a -> new AllocateAction((Integer) a[0], (Map) a[1], (Map) a[2], (Map) a[3])); + + static { + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), NUMBER_OF_REPLICAS_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.mapStrings(), INCLUDE_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.mapStrings(), EXCLUDE_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.mapStrings(), REQUIRE_FIELD); + } + + private final Integer numberOfReplicas; + private final Map include; + private final Map exclude; + private final Map require; + + public static AllocateAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public AllocateAction(Integer numberOfReplicas, Map include, Map exclude, Map require) { + if (include == null) { + this.include = Collections.emptyMap(); + } else { + this.include = include; + } + if (exclude == null) { + this.exclude = Collections.emptyMap(); + } else { + this.exclude = exclude; + } + if (require == null) { + this.require = Collections.emptyMap(); + } else { + this.require = require; + } + if (this.include.isEmpty() && this.exclude.isEmpty() && this.require.isEmpty() && numberOfReplicas == null) { + throw new IllegalArgumentException( + "At least one of " + INCLUDE_FIELD.getPreferredName() + ", " + EXCLUDE_FIELD.getPreferredName() + " or " + + REQUIRE_FIELD.getPreferredName() + "must contain attributes for action " + NAME); + } + if (numberOfReplicas != null && numberOfReplicas < 0) { + throw new IllegalArgumentException("[" + NUMBER_OF_REPLICAS_FIELD.getPreferredName() + "] must be >= 0"); + } + this.numberOfReplicas = numberOfReplicas; + } + + @SuppressWarnings("unchecked") + public AllocateAction(StreamInput in) throws IOException { + this(in.readOptionalVInt(), (Map) in.readGenericValue(), (Map) in.readGenericValue(), + (Map) in.readGenericValue()); + } + + public Integer getNumberOfReplicas() { + return numberOfReplicas; + } + + public Map getInclude() { + return include; + } + + public Map getExclude() { + return exclude; + } + + public Map getRequire() { + return require; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalVInt(numberOfReplicas); + out.writeGenericValue(include); + out.writeGenericValue(exclude); + out.writeGenericValue(require); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (numberOfReplicas != null) { + builder.field(NUMBER_OF_REPLICAS_FIELD.getPreferredName(), numberOfReplicas); + } + builder.field(INCLUDE_FIELD.getPreferredName(), include); + builder.field(EXCLUDE_FIELD.getPreferredName(), exclude); + builder.field(REQUIRE_FIELD.getPreferredName(), require); + builder.endObject(); + return builder; + } + + @Override + public boolean isSafeAction() { + return true; + } + + @Override + public List toSteps(Client client, String phase, StepKey nextStepKey) { + StepKey allocateKey = new StepKey(phase, NAME, NAME); + StepKey allocationRoutedKey = new StepKey(phase, NAME, AllocationRoutedStep.NAME); + + Settings.Builder newSettings = Settings.builder(); + if (numberOfReplicas != null) { + newSettings.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas); + } + include.forEach((key, value) -> newSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + key, value)); + exclude.forEach((key, value) -> newSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + key, value)); + require.forEach((key, value) -> newSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + key, value)); + UpdateSettingsStep allocateStep = new UpdateSettingsStep(allocateKey, allocationRoutedKey, client, newSettings.build()); + AllocationRoutedStep routedCheckStep = new AllocationRoutedStep(allocationRoutedKey, nextStepKey, true); + return Arrays.asList(allocateStep, routedCheckStep); + } + + @Override + public List toStepKeys(String phase) { + StepKey allocateKey = new StepKey(phase, NAME, NAME); + StepKey allocationRoutedKey = new StepKey(phase, NAME, AllocationRoutedStep.NAME); + return Arrays.asList(allocateKey, allocationRoutedKey); + } + + @Override + public int hashCode() { + return Objects.hash(numberOfReplicas, include, exclude, require); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + AllocateAction other = (AllocateAction) obj; + return Objects.equals(numberOfReplicas, other.numberOfReplicas) && + Objects.equals(include, other.include) && + Objects.equals(exclude, other.exclude) && + Objects.equals(require, other.require); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStep.java new file mode 100644 index 0000000000000..be7c7799bd010 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStep.java @@ -0,0 +1,201 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import com.carrotsearch.hppc.cursors.ObjectCursor; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.ImmutableOpenIntMap; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; + +import java.io.IOException; +import java.util.Collections; +import java.util.Objects; + +public class AllocationRoutedStep extends ClusterStateWaitStep { + public static final String NAME = "check-allocation"; + + private static final Logger logger = LogManager.getLogger(AllocationRoutedStep.class); + + private static final AllocationDeciders ALLOCATION_DECIDERS = new AllocationDeciders(Settings.EMPTY, Collections.singletonList( + new FilterAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)))); + + private boolean waitOnAllShardCopies; + + AllocationRoutedStep(StepKey key, StepKey nextStepKey, boolean waitOnAllShardCopies) { + super(key, nextStepKey); + this.waitOnAllShardCopies = waitOnAllShardCopies; + } + + public boolean getWaitOnAllShardCopies() { + return waitOnAllShardCopies; + } + + @Override + public Result isConditionMet(Index index, ClusterState clusterState) { + IndexMetaData idxMeta = clusterState.metaData().index(index); + if (idxMeta == null) { + // Index must have been since deleted, ignore it + logger.debug("[{}] lifecycle action for index [{}] executed but index no longer exists", getKey().getAction(), index.getName()); + return new Result(false, null); + } + if (ActiveShardCount.ALL.enoughShardsActive(clusterState, index.getName()) == false) { + logger.debug("[{}] lifecycle action for index [{}] cannot make progress because not all shards are active", + getKey().getAction(), index.getName()); + return new Result(false, new Info(idxMeta.getNumberOfReplicas(), -1, false)); + } + // All the allocation attributes are already set so just need to check + // if the allocation has happened + RoutingAllocation allocation = new RoutingAllocation(ALLOCATION_DECIDERS, clusterState.getRoutingNodes(), clusterState, null, + System.nanoTime()); + int allocationPendingAllShards = 0; + + ImmutableOpenIntMap allShards = clusterState.getRoutingTable().index(index).getShards(); + for (ObjectCursor shardRoutingTable : allShards.values()) { + int allocationPendingThisShard = 0; + int shardCopiesThisShard = shardRoutingTable.value.size(); + for (ShardRouting shardRouting : shardRoutingTable.value.shards()) { + String currentNodeId = shardRouting.currentNodeId(); + boolean canRemainOnCurrentNode = ALLOCATION_DECIDERS + .canRemain(shardRouting, clusterState.getRoutingNodes().node(currentNodeId), allocation) + .type() == Decision.Type.YES; + if (canRemainOnCurrentNode == false) { + allocationPendingThisShard++; + } + } + + if (waitOnAllShardCopies) { + allocationPendingAllShards += allocationPendingThisShard; + } else if (shardCopiesThisShard - allocationPendingThisShard == 0) { + allocationPendingAllShards++; + } + } + if (allocationPendingAllShards > 0) { + logger.debug( + "[{}] lifecycle action for index [{}] waiting for [{}] shards " + "to be allocated to nodes matching the given filters", + getKey().getAction(), index, allocationPendingAllShards); + return new Result(false, new Info(idxMeta.getNumberOfReplicas(), allocationPendingAllShards, true)); + } else { + logger.debug("[{}] lifecycle action for index [{}] complete", getKey().getAction(), index); + return new Result(true, null); + } + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), waitOnAllShardCopies); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + AllocationRoutedStep other = (AllocationRoutedStep) obj; + return super.equals(obj) && + Objects.equals(waitOnAllShardCopies, other.waitOnAllShardCopies); + } + + public static final class Info implements ToXContentObject { + + private final long actualReplicas; + private final long numberShardsLeftToAllocate; + private final boolean allShardsActive; + private final String message; + + static final ParseField ACTUAL_REPLICAS = new ParseField("actual_replicas"); + static final ParseField SHARDS_TO_ALLOCATE = new ParseField("shards_left_to_allocate"); + static final ParseField ALL_SHARDS_ACTIVE = new ParseField("all_shards_active"); + static final ParseField MESSAGE = new ParseField("message"); + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("allocation_routed_step_info", + a -> new Info((long) a[0], (long) a[1], (boolean) a[2])); + static { + PARSER.declareLong(ConstructingObjectParser.constructorArg(), ACTUAL_REPLICAS); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), SHARDS_TO_ALLOCATE); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), ALL_SHARDS_ACTIVE); + PARSER.declareString((i, s) -> {}, MESSAGE); + } + + public Info(long actualReplicas, long numberShardsLeftToAllocate, boolean allShardsActive) { + this.actualReplicas = actualReplicas; + this.numberShardsLeftToAllocate = numberShardsLeftToAllocate; + this.allShardsActive = allShardsActive; + if (allShardsActive == false) { + message = "Waiting for all shard copies to be active"; + } else { + message = "Waiting for [" + numberShardsLeftToAllocate + "] shards " + + "to be allocated to nodes matching the given filters"; + } + } + + public long getActualReplicas() { + return actualReplicas; + } + + public long getNumberShardsLeftToAllocate() { + return numberShardsLeftToAllocate; + } + + public boolean allShardsActive() { + return allShardsActive; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MESSAGE.getPreferredName(), message); + builder.field(SHARDS_TO_ALLOCATE.getPreferredName(), numberShardsLeftToAllocate); + builder.field(ALL_SHARDS_ACTIVE.getPreferredName(), allShardsActive); + builder.field(ACTUAL_REPLICAS.getPreferredName(), actualReplicas); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(actualReplicas, numberShardsLeftToAllocate, allShardsActive); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Info other = (Info) obj; + return Objects.equals(actualReplicas, other.actualReplicas) && + Objects.equals(numberShardsLeftToAllocate, other.numberShardsLeftToAllocate) && + Objects.equals(allShardsActive, other.allShardsActive); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncActionStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncActionStep.java new file mode 100644 index 0000000000000..4e35ef60a09d6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncActionStep.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; + +public abstract class AsyncActionStep extends Step { + + private Client client; + + public AsyncActionStep(StepKey key, StepKey nextStepKey, Client client) { + super(key, nextStepKey); + this.client = client; + } + + protected Client getClient() { + return client; + } + + public boolean indexSurvives() { + return true; + } + + public abstract void performAction(IndexMetaData indexMetaData, ClusterState currentClusterState, Listener listener); + + public interface Listener { + + void onResponse(boolean complete); + + void onFailure(Exception e); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncWaitStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncWaitStep.java new file mode 100644 index 0000000000000..f6c968cfae41a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncWaitStep.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.xcontent.ToXContentObject; + +public abstract class AsyncWaitStep extends Step { + + private Client client; + + public AsyncWaitStep(StepKey key, StepKey nextStepKey, Client client) { + super(key, nextStepKey); + this.client = client; + } + + protected Client getClient() { + return client; + } + + public abstract void evaluateCondition(IndexMetaData indexMetaData, Listener listener); + + public interface Listener { + + void onResponse(boolean conditionMet, ToXContentObject infomationContext); + + void onFailure(Exception e); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ClusterStateActionStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ClusterStateActionStep.java new file mode 100644 index 0000000000000..ae64de497886a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ClusterStateActionStep.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.index.Index; + +public abstract class ClusterStateActionStep extends Step { + + public ClusterStateActionStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } + + public abstract ClusterState performAction(Index index, ClusterState clusterState); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ClusterStateWaitStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ClusterStateWaitStep.java new file mode 100644 index 0000000000000..0468f75490d9e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ClusterStateWaitStep.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.index.Index; + +public abstract class ClusterStateWaitStep extends Step { + + public ClusterStateWaitStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } + + public abstract Result isConditionMet(Index index, ClusterState clusterState); + + public static class Result { + private final boolean complete; + private final ToXContentObject infomationContext; + + public Result(boolean complete, ToXContentObject infomationContext) { + this.complete = complete; + this.infomationContext = infomationContext; + } + + public boolean isComplete() { + return complete; + } + + public ToXContentObject getInfomationContext() { + return infomationContext; + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStep.java new file mode 100644 index 0000000000000..b8192dd7e43be --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStep.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.index.Index; + +import java.util.Objects; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; + +/** + * Copies the execution state data from one index to another, typically after a + * new index has been created. Useful for actions such as shrink. + */ +public class CopyExecutionStateStep extends ClusterStateActionStep { + public static final String NAME = "copy_execution_state"; + + private static final Logger logger = LogManager.getLogger(CopyExecutionStateStep.class); + + private String shrunkIndexPrefix; + + + public CopyExecutionStateStep(StepKey key, StepKey nextStepKey, String shrunkIndexPrefix) { + super(key, nextStepKey); + this.shrunkIndexPrefix = shrunkIndexPrefix; + } + + String getShrunkIndexPrefix() { + return shrunkIndexPrefix; + } + + @Override + public ClusterState performAction(Index index, ClusterState clusterState) { + IndexMetaData indexMetaData = clusterState.metaData().index(index); + if (indexMetaData == null) { + // Index must have been since deleted, ignore it + logger.debug("[{}] lifecycle action for index [{}] executed but index no longer exists", getKey().getAction(), index.getName()); + return clusterState; + } + // get source index + String indexName = indexMetaData.getIndex().getName(); + // get target shrink index + String targetIndexName = shrunkIndexPrefix + indexName; + IndexMetaData targetIndexMetaData = clusterState.metaData().index(targetIndexName); + + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); + String phase = lifecycleState.getPhase(); + String action = lifecycleState.getAction(); + long lifecycleDate = lifecycleState.getLifecycleDate(); + + LifecycleExecutionState.Builder relevantTargetCustomData = LifecycleExecutionState.builder(); + relevantTargetCustomData.setIndexCreationDate(lifecycleDate); + relevantTargetCustomData.setPhase(phase); + relevantTargetCustomData.setAction(action); + relevantTargetCustomData.setStep(ShrunkenIndexCheckStep.NAME); + + MetaData.Builder newMetaData = MetaData.builder(clusterState.getMetaData()) + .put(IndexMetaData.builder(targetIndexMetaData) + .putCustom(ILM_CUSTOM_METADATA_KEY, relevantTargetCustomData.build().asMap())); + + return ClusterState.builder(clusterState).metaData(newMetaData).build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + CopyExecutionStateStep that = (CopyExecutionStateStep) o; + return Objects.equals(shrunkIndexPrefix, that.shrunkIndexPrefix); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), shrunkIndexPrefix); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteAction.java new file mode 100644 index 0000000000000..1a0ad4c789ce4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteAction.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +/** + * A {@link LifecycleAction} which deletes the index. + */ +public class DeleteAction implements LifecycleAction { + public static final String NAME = "delete"; + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, DeleteAction::new); + + public static DeleteAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public DeleteAction() { + } + + public DeleteAction(StreamInput in) throws IOException { + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public boolean isSafeAction() { + return true; + } + + @Override + public List toSteps(Client client, String phase, Step.StepKey nextStepKey) { + Step.StepKey deleteStepKey = new Step.StepKey(phase, NAME, DeleteStep.NAME); + return Collections.singletonList(new DeleteStep(deleteStepKey, nextStepKey, client)); + } + + @Override + public List toStepKeys(String phase) { + return Collections.singletonList(new Step.StepKey(phase, NAME, DeleteStep.NAME)); + } + + @Override + public int hashCode() { + return 1; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } + + @Override + public String toString() { + return Strings.toString(this); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStep.java new file mode 100644 index 0000000000000..b5ae441388419 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStep.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; + +public class DeleteStep extends AsyncActionStep { + public static final String NAME = "delete"; + + public DeleteStep(StepKey key, StepKey nextStepKey, Client client) { + super(key, nextStepKey, client); + } + + @Override + public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + getClient().admin().indices() + .delete(new DeleteIndexRequest(indexMetaData.getIndex().getName()), + ActionListener.wrap(response -> listener.onResponse(true) , listener::onFailure)); + } + + @Override + public boolean indexSurvives() { + return false; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ErrorStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ErrorStep.java new file mode 100644 index 0000000000000..50ad0155dff29 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ErrorStep.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +public class ErrorStep extends Step { + public static final String NAME = "ERROR"; + + public ErrorStep(StepKey key) { + super(key, key); + if (NAME.equals(key.getName()) == false) { + throw new IllegalArgumentException("An error step must have a step key whose step name is " + NAME); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequest.java new file mode 100644 index 0000000000000..037de2d505292 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequest.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.info.ClusterInfoRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +/** + * The request object used by the Explain Lifecycle API. + * + * Multiple indices may be queried in the same request using the + * {@link #indices(String...)} method + */ +public class ExplainLifecycleRequest extends ClusterInfoRequest { + + public ExplainLifecycleRequest() { + super(); + } + + public ExplainLifecycleRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indices()), indicesOptions()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ExplainLifecycleRequest other = (ExplainLifecycleRequest) obj; + return Objects.deepEquals(indices(), other.indices()) && + Objects.equals(indicesOptions(), other.indicesOptions()); + } + + @Override + public String toString() { + return "ExplainLifecycleRequest [indices()=" + Arrays.toString(indices()) + ", indicesOptions()=" + indicesOptions() + "]"; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponse.java new file mode 100644 index 0000000000000..915ca17cb43a4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponse.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * The response object returned by the Explain Lifecycle API. + * + * Since the API can be run over multiple indices the response provides a map of + * index to the explanation of the lifecycle status for that index. + */ +public class ExplainLifecycleResponse extends ActionResponse implements ToXContentObject { + + public static final ParseField INDICES_FIELD = new ParseField("indices"); + + private Map indexResponses; + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "explain_lifecycle_response", a -> new ExplainLifecycleResponse(((List) a[0]).stream() + .collect(Collectors.toMap(IndexLifecycleExplainResponse::getIndex, Function.identity())))); + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> IndexLifecycleExplainResponse.PARSER.apply(p, c), + INDICES_FIELD); + } + + public static ExplainLifecycleResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public ExplainLifecycleResponse() { + } + + public ExplainLifecycleResponse(Map indexResponses) { + this.indexResponses = indexResponses; + } + + /** + * @return a map of the responses from each requested index. The maps key is + * the index name and the value is the + * {@link IndexLifecycleExplainResponse} describing the current + * lifecycle status of that index + */ + public Map getIndexResponses() { + return indexResponses; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject(INDICES_FIELD.getPreferredName()); + for (IndexLifecycleExplainResponse indexResponse : indexResponses.values()) { + builder.field(indexResponse.getIndex(), indexResponse); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + int size = in.readVInt(); + Map indexResponses = new HashMap<>(size); + for (int i = 0; i < size; i++) { + IndexLifecycleExplainResponse indexResponse = new IndexLifecycleExplainResponse(in); + indexResponses.put(indexResponse.getIndex(), indexResponse); + } + this.indexResponses = indexResponses; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(indexResponses.size()); + for (IndexLifecycleExplainResponse e : indexResponses.values()) { + e.writeTo(out); + } + } + + @Override + public int hashCode() { + return Objects.hash(indexResponses); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ExplainLifecycleResponse other = (ExplainLifecycleResponse) obj; + return Objects.equals(indexResponses, other.indexResponses); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeAction.java new file mode 100644 index 0000000000000..2c4508a8355f0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeAction.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * A {@link LifecycleAction} which force-merges the index. + */ +public class ForceMergeAction implements LifecycleAction { + public static final String NAME = "forcemerge"; + public static final ParseField MAX_NUM_SEGMENTS_FIELD = new ParseField("max_num_segments"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + false, a -> { + int maxNumSegments = (int) a[0]; + return new ForceMergeAction(maxNumSegments); + }); + + static { + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_NUM_SEGMENTS_FIELD); + } + + private final int maxNumSegments; + + public static ForceMergeAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public ForceMergeAction(int maxNumSegments) { + if (maxNumSegments <= 0) { + throw new IllegalArgumentException("[" + MAX_NUM_SEGMENTS_FIELD.getPreferredName() + + "] must be a positive integer"); + } + this.maxNumSegments = maxNumSegments; + } + + public ForceMergeAction(StreamInput in) throws IOException { + this.maxNumSegments = in.readVInt(); + } + + public int getMaxNumSegments() { + return maxNumSegments; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(maxNumSegments); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public boolean isSafeAction() { + return true; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MAX_NUM_SEGMENTS_FIELD.getPreferredName(), maxNumSegments); + builder.endObject(); + return builder; + } + + @Override + public List toSteps(Client client, String phase, Step.StepKey nextStepKey) { + Settings readOnlySettings = Settings.builder().put(IndexMetaData.SETTING_BLOCKS_WRITE, true).build(); + + StepKey readOnlyKey = new StepKey(phase, NAME, ReadOnlyAction.NAME); + StepKey forceMergeKey = new StepKey(phase, NAME, ForceMergeStep.NAME); + StepKey countKey = new StepKey(phase, NAME, SegmentCountStep.NAME); + + UpdateSettingsStep readOnlyStep = new UpdateSettingsStep(readOnlyKey, forceMergeKey, client, readOnlySettings); + ForceMergeStep forceMergeStep = new ForceMergeStep(forceMergeKey, countKey, client, maxNumSegments); + SegmentCountStep segmentCountStep = new SegmentCountStep(countKey, nextStepKey, client, maxNumSegments); + return Arrays.asList(readOnlyStep, forceMergeStep, segmentCountStep); + } + + @Override + public List toStepKeys(String phase) { + StepKey readOnlyKey = new StepKey(phase, NAME, ReadOnlyAction.NAME); + StepKey forceMergeKey = new StepKey(phase, NAME, ForceMergeStep.NAME); + StepKey countKey = new StepKey(phase, NAME, SegmentCountStep.NAME); + return Arrays.asList(readOnlyKey, forceMergeKey, countKey); + } + + @Override + public int hashCode() { + return Objects.hash(maxNumSegments); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ForceMergeAction other = (ForceMergeAction) obj; + return Objects.equals(maxNumSegments, other.maxNumSegments); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStep.java new file mode 100644 index 0000000000000..776043babf0fc --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStep.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; + +import java.util.Objects; + +public class ForceMergeStep extends AsyncActionStep { + public static final String NAME = "forcemerge"; + private final int maxNumSegments; + + public ForceMergeStep(StepKey key, StepKey nextStepKey, Client client, int maxNumSegments) { + super(key, nextStepKey, client); + this.maxNumSegments = maxNumSegments; + } + + public int getMaxNumSegments() { + return maxNumSegments; + } + + @Override + public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + ForceMergeRequest request = new ForceMergeRequest(indexMetaData.getIndex().getName()); + request.maxNumSegments(maxNumSegments); + getClient().admin().indices() + .forceMerge(request, ActionListener.wrap(response -> listener.onResponse(true), + listener::onFailure)); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), maxNumSegments); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + ForceMergeStep other = (ForceMergeStep) obj; + return super.equals(obj) && + Objects.equals(maxNumSegments, other.maxNumSegments); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleExplainResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleExplainResponse.java new file mode 100644 index 0000000000000..e70268eb16a07 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleExplainResponse.java @@ -0,0 +1,314 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.joda.time.DateTime; +import org.joda.time.chrono.ISOChronology; + +import java.io.IOException; +import java.util.Objects; + +public class IndexLifecycleExplainResponse implements ToXContentObject, Writeable { + + private static final ParseField INDEX_FIELD = new ParseField("index"); + private static final ParseField MANAGED_BY_ILM_FIELD = new ParseField("managed"); + private static final ParseField POLICY_NAME_FIELD = new ParseField("policy"); + private static final ParseField LIFECYCLE_DATE_FIELD = new ParseField("lifecycle_date"); + private static final ParseField PHASE_FIELD = new ParseField("phase"); + private static final ParseField ACTION_FIELD = new ParseField("action"); + private static final ParseField STEP_FIELD = new ParseField("step"); + private static final ParseField FAILED_STEP_FIELD = new ParseField("failed_step"); + private static final ParseField PHASE_TIME_FIELD = new ParseField("phase_time"); + private static final ParseField ACTION_TIME_FIELD = new ParseField("action_time"); + private static final ParseField STEP_TIME_FIELD = new ParseField("step_time"); + private static final ParseField STEP_INFO_FIELD = new ParseField("step_info"); + private static final ParseField PHASE_EXECUTION_INFO = new ParseField("phase_execution"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "index_lifecycle_explain_response", + a -> new IndexLifecycleExplainResponse( + (String) a[0], + (boolean) a[1], + (String) a[2], + (Long) (a[3]), + (String) a[4], + (String) a[5], + (String) a[6], + (String) a[7], + (Long) (a[8]), + (Long) (a[9]), + (Long) (a[10]), + (BytesReference) a[11], + (PhaseExecutionInfo) a[12])); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_FIELD); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), MANAGED_BY_ILM_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), POLICY_NAME_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), LIFECYCLE_DATE_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), PHASE_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), ACTION_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), STEP_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FAILED_STEP_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), PHASE_TIME_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), ACTION_TIME_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), STEP_TIME_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.copyCurrentStructure(p); + return BytesArray.bytes(builder); + }, STEP_INFO_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> PhaseExecutionInfo.parse(p, ""), + PHASE_EXECUTION_INFO); + } + + private final String index; + private final String policyName; + private final String phase; + private final String action; + private final String step; + private final String failedStep; + private final Long lifecycleDate; + private final Long phaseTime; + private final Long actionTime; + private final Long stepTime; + private final boolean managedByILM; + private final BytesReference stepInfo; + private final PhaseExecutionInfo phaseExecutionInfo; + + public static IndexLifecycleExplainResponse newManagedIndexResponse(String index, String policyName, Long lifecycleDate, + String phase, String action, String step, String failedStep, Long phaseTime, Long actionTime, Long stepTime, + BytesReference stepInfo, PhaseExecutionInfo phaseExecutionInfo) { + return new IndexLifecycleExplainResponse(index, true, policyName, lifecycleDate, phase, action, step, failedStep, phaseTime, + actionTime, stepTime, stepInfo, phaseExecutionInfo); + } + + public static IndexLifecycleExplainResponse newUnmanagedIndexResponse(String index) { + return new IndexLifecycleExplainResponse(index, false, null, null, null, null, null, null, null, null, null, null, null); + } + + private IndexLifecycleExplainResponse(String index, boolean managedByILM, String policyName, Long lifecycleDate, + String phase, String action, String step, String failedStep, Long phaseTime, Long actionTime, + Long stepTime, BytesReference stepInfo, PhaseExecutionInfo phaseExecutionInfo) { + if (managedByILM) { + if (policyName == null) { + throw new IllegalArgumentException("[" + POLICY_NAME_FIELD.getPreferredName() + "] cannot be null for managed index"); + } + } else { + if (policyName != null || lifecycleDate != null || phase != null || action != null || step != null || failedStep != null + || phaseTime != null || actionTime != null || stepTime != null || stepInfo != null || phaseExecutionInfo != null) { + throw new IllegalArgumentException( + "Unmanaged index response must only contain fields: [" + MANAGED_BY_ILM_FIELD + ", " + INDEX_FIELD + "]"); + } + } + this.index = index; + this.policyName = policyName; + this.managedByILM = managedByILM; + this.lifecycleDate = lifecycleDate; + this.phase = phase; + this.action = action; + this.step = step; + this.phaseTime = phaseTime; + this.actionTime = actionTime; + this.stepTime = stepTime; + this.failedStep = failedStep; + this.stepInfo = stepInfo; + this.phaseExecutionInfo = phaseExecutionInfo; + } + + public IndexLifecycleExplainResponse(StreamInput in) throws IOException { + index = in.readString(); + managedByILM = in.readBoolean(); + if (managedByILM) { + policyName = in.readString(); + lifecycleDate = in.readOptionalLong(); + phase = in.readOptionalString(); + action = in.readOptionalString(); + step = in.readOptionalString(); + failedStep = in.readOptionalString(); + phaseTime = in.readOptionalLong(); + actionTime = in.readOptionalLong(); + stepTime = in.readOptionalLong(); + stepInfo = in.readOptionalBytesReference(); + phaseExecutionInfo = in.readOptionalWriteable(PhaseExecutionInfo::new); + } else { + policyName = null; + lifecycleDate = null; + phase = null; + action = null; + step = null; + failedStep = null; + phaseTime = null; + actionTime = null; + stepTime = null; + stepInfo = null; + phaseExecutionInfo = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + out.writeBoolean(managedByILM); + if (managedByILM) { + out.writeString(policyName); + out.writeOptionalLong(lifecycleDate); + out.writeOptionalString(phase); + out.writeOptionalString(action); + out.writeOptionalString(step); + out.writeOptionalString(failedStep); + out.writeOptionalLong(phaseTime); + out.writeOptionalLong(actionTime); + out.writeOptionalLong(stepTime); + out.writeOptionalBytesReference(stepInfo); + out.writeOptionalWriteable(phaseExecutionInfo); + } + } + + public String getIndex() { + return index; + } + + public boolean managedByILM() { + return managedByILM; + } + + public String getPolicyName() { + return policyName; + } + + public Long getLifecycleDate() { + return lifecycleDate; + } + + public String getPhase() { + return phase; + } + + public Long getPhaseTime() { + return phaseTime; + } + + public String getAction() { + return action; + } + + public Long getActionTime() { + return actionTime; + } + + public String getStep() { + return step; + } + + public Long getStepTime() { + return stepTime; + } + + public String getFailedStep() { + return failedStep; + } + + public BytesReference getStepInfo() { + return stepInfo; + } + + public PhaseExecutionInfo getPhaseExecutionInfo() { + return phaseExecutionInfo; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(INDEX_FIELD.getPreferredName(), index); + builder.field(MANAGED_BY_ILM_FIELD.getPreferredName(), managedByILM); + if (managedByILM) { + builder.field(POLICY_NAME_FIELD.getPreferredName(), policyName); + if (builder.humanReadable()) { + builder.field(LIFECYCLE_DATE_FIELD.getPreferredName(), new DateTime(lifecycleDate, ISOChronology.getInstanceUTC())); + } else { + builder.field(LIFECYCLE_DATE_FIELD.getPreferredName(), lifecycleDate); + } + builder.field(PHASE_FIELD.getPreferredName(), phase); + if (builder.humanReadable()) { + builder.field(PHASE_TIME_FIELD.getPreferredName(), new DateTime(phaseTime, ISOChronology.getInstanceUTC())); + } else { + builder.field(PHASE_TIME_FIELD.getPreferredName(), phaseTime); + } + builder.field(ACTION_FIELD.getPreferredName(), action); + if (builder.humanReadable()) { + builder.field(ACTION_TIME_FIELD.getPreferredName(), new DateTime(actionTime, ISOChronology.getInstanceUTC())); + } else { + builder.field(ACTION_TIME_FIELD.getPreferredName(), actionTime); + } + builder.field(STEP_FIELD.getPreferredName(), step); + if (builder.humanReadable()) { + builder.field(STEP_TIME_FIELD.getPreferredName(), new DateTime(stepTime, ISOChronology.getInstanceUTC())); + } else { + builder.field(STEP_TIME_FIELD.getPreferredName(), stepTime); + } + if (Strings.hasLength(failedStep)) { + builder.field(FAILED_STEP_FIELD.getPreferredName(), failedStep); + } + if (stepInfo != null && stepInfo.length() > 0) { + builder.rawField(STEP_INFO_FIELD.getPreferredName(), stepInfo.streamInput(), XContentType.JSON); + } + if (phaseExecutionInfo != null) { + builder.field(PHASE_EXECUTION_INFO.getPreferredName(), phaseExecutionInfo); + } + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(index, managedByILM, policyName, lifecycleDate, phase, action, step, failedStep, phaseTime, actionTime, + stepTime, stepInfo, phaseExecutionInfo); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + IndexLifecycleExplainResponse other = (IndexLifecycleExplainResponse) obj; + return Objects.equals(index, other.index) && + Objects.equals(managedByILM, other.managedByILM) && + Objects.equals(policyName, other.policyName) && + Objects.equals(lifecycleDate, other.lifecycleDate) && + Objects.equals(phase, other.phase) && + Objects.equals(action, other.action) && + Objects.equals(step, other.step) && + Objects.equals(failedStep, other.failedStep) && + Objects.equals(phaseTime, other.phaseTime) && + Objects.equals(actionTime, other.actionTime) && + Objects.equals(stepTime, other.stepTime) && + Objects.equals(stepInfo, other.stepInfo) && + Objects.equals(phaseExecutionInfo, other.phaseExecutionInfo); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleFeatureSetUsage.java new file mode 100644 index 0000000000000..f0dd25eabac1f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleFeatureSetUsage.java @@ -0,0 +1,205 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class IndexLifecycleFeatureSetUsage extends XPackFeatureSet.Usage { + + private List policyStats; + + public IndexLifecycleFeatureSetUsage(StreamInput input) throws IOException { + super(input); + if (input.readBoolean()) { + policyStats = input.readList(PolicyStats::new); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + boolean hasPolicyStats = policyStats != null; + out.writeBoolean(hasPolicyStats); + if (hasPolicyStats) { + out.writeList(policyStats); + } + } + + public IndexLifecycleFeatureSetUsage(boolean available, boolean enabled) { + this(available, enabled, null); + } + + public IndexLifecycleFeatureSetUsage(boolean available, boolean enabled, List policyStats) { + super(XPackField.INDEX_LIFECYCLE, available, enabled); + this.policyStats = policyStats; + } + + @Override + protected void innerXContent(XContentBuilder builder, Params params) throws IOException { + if (policyStats != null) { + builder.field("policy_count", policyStats.size()); + builder.field("policy_stats", policyStats); + } + } + + public List getPolicyStats() { + return policyStats; + } + + @Override + public int hashCode() { + return Objects.hash(available, enabled, policyStats); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + IndexLifecycleFeatureSetUsage other = (IndexLifecycleFeatureSetUsage) obj; + return Objects.equals(available, other.available) && + Objects.equals(enabled, other.enabled) && + Objects.equals(policyStats, other.policyStats); + } + + public static final class PolicyStats implements ToXContentObject, Writeable { + + public static final ParseField INDICES_MANAGED_FIELD = new ParseField("indices_managed"); + + private final Map phaseStats; + private final int indicesManaged; + + public PolicyStats(Map phaseStats, int numberIndicesManaged) { + this.phaseStats = phaseStats; + this.indicesManaged = numberIndicesManaged; + } + + public PolicyStats(StreamInput in) throws IOException { + this.phaseStats = in.readMap(StreamInput::readString, PhaseStats::new); + this.indicesManaged = in.readVInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(phaseStats, StreamOutput::writeString, (o, p) -> p.writeTo(o)); + out.writeVInt(indicesManaged); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(LifecyclePolicy.PHASES_FIELD.getPreferredName(), phaseStats); + builder.field(INDICES_MANAGED_FIELD.getPreferredName(), indicesManaged); + builder.endObject(); + return builder; + } + + public Map getPhaseStats() { + return phaseStats; + } + + public int getIndicesManaged() { + return indicesManaged; + } + + @Override + public int hashCode() { + return Objects.hash(phaseStats, indicesManaged); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PolicyStats other = (PolicyStats) obj; + return Objects.equals(phaseStats, other.phaseStats) && + Objects.equals(indicesManaged, other.indicesManaged); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } + + public static final class PhaseStats implements ToXContentObject, Writeable { + private final String[] actionNames; + private final TimeValue minimumAge; + + public PhaseStats(TimeValue after, String[] actionNames) { + this.actionNames = actionNames; + this.minimumAge = after; + } + + public PhaseStats(StreamInput in) throws IOException { + actionNames = in.readStringArray(); + minimumAge = in.readTimeValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringArray(actionNames); + out.writeTimeValue(minimumAge); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Phase.MIN_AGE.getPreferredName(), minimumAge.getMillis()); + builder.field(Phase.ACTIONS_FIELD.getPreferredName(), actionNames); + builder.endObject(); + return builder; + } + + public String[] getActionNames() { + return actionNames; + } + + public TimeValue getAfter() { + return minimumAge; + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(actionNames), minimumAge); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PhaseStats other = (PhaseStats) obj; + return Objects.equals(minimumAge, other.minimumAge) && + Objects.deepEquals(actionNames, other.actionNames); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java new file mode 100644 index 0000000000000..0d68f070b6ecb --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java @@ -0,0 +1,183 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.DiffableUtils; +import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.MetaData.Custom; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackPlugin.XPackMetaDataCustom; + +import java.io.IOException; +import java.util.Collections; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; +import java.util.function.Function; +import java.util.stream.Collectors; + + +public class IndexLifecycleMetadata implements XPackMetaDataCustom { + public static final String TYPE = "index_lifecycle"; + public static final ParseField OPERATION_MODE_FIELD = new ParseField("operation_mode"); + public static final ParseField POLICIES_FIELD = new ParseField("policies"); + public static final IndexLifecycleMetadata EMPTY = new IndexLifecycleMetadata(Collections.emptySortedMap(), OperationMode.RUNNING); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(TYPE, + a -> new IndexLifecycleMetadata( + ((List) a[0]).stream() + .collect(Collectors.toMap(LifecyclePolicyMetadata::getName, Function.identity())), + OperationMode.valueOf((String) a[1]))); + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> LifecyclePolicyMetadata.parse(p, n), + v -> { + throw new IllegalArgumentException("ordered " + POLICIES_FIELD.getPreferredName() + " are not supported"); + }, POLICIES_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), OPERATION_MODE_FIELD); + } + + private final Map policyMetadatas; + private final OperationMode operationMode; + + public IndexLifecycleMetadata(Map policies, OperationMode operationMode) { + this.policyMetadatas = Collections.unmodifiableMap(policies); + this.operationMode = operationMode; + } + + public IndexLifecycleMetadata(StreamInput in) throws IOException { + int size = in.readVInt(); + TreeMap policies = new TreeMap<>(); + for (int i = 0; i < size; i++) { + policies.put(in.readString(), new LifecyclePolicyMetadata(in)); + } + this.policyMetadatas = policies; + this.operationMode = in.readEnum(OperationMode.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(policyMetadatas.size()); + for (Map.Entry entry : policyMetadatas.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); + } + out.writeEnum(operationMode); + } + + public Map getPolicyMetadatas() { + return policyMetadatas; + } + + public OperationMode getOperationMode() { + return operationMode; + } + + public Map getPolicies() { + return policyMetadatas.values().stream().map(LifecyclePolicyMetadata::getPolicy) + .collect(Collectors.toMap(LifecyclePolicy::getName, Function.identity())); + } + + @Override + public Diff diff(Custom previousState) { + return new IndexLifecycleMetadataDiff((IndexLifecycleMetadata) previousState, this); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(POLICIES_FIELD.getPreferredName(), policyMetadatas); + builder.field(OPERATION_MODE_FIELD.getPreferredName(), operationMode); + return builder; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_6_5_0; + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public EnumSet context() { + return MetaData.ALL_CONTEXTS; + } + + @Override + public int hashCode() { + return Objects.hash(policyMetadatas, operationMode); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + IndexLifecycleMetadata other = (IndexLifecycleMetadata) obj; + return Objects.equals(policyMetadatas, other.policyMetadatas) + && Objects.equals(operationMode, other.operationMode); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + public static class IndexLifecycleMetadataDiff implements NamedDiff { + + final Diff> policies; + final OperationMode operationMode; + + IndexLifecycleMetadataDiff(IndexLifecycleMetadata before, IndexLifecycleMetadata after) { + this.policies = DiffableUtils.diff(before.policyMetadatas, after.policyMetadatas, DiffableUtils.getStringKeySerializer()); + this.operationMode = after.operationMode; + } + + public IndexLifecycleMetadataDiff(StreamInput in) throws IOException { + this.policies = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), LifecyclePolicyMetadata::new, + IndexLifecycleMetadataDiff::readLifecyclePolicyDiffFrom); + this.operationMode = in.readEnum(OperationMode.class); + } + + @Override + public MetaData.Custom apply(MetaData.Custom part) { + TreeMap newPolicies = new TreeMap<>( + policies.apply(((IndexLifecycleMetadata) part).policyMetadatas)); + return new IndexLifecycleMetadata(newPolicies, this.operationMode); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + policies.writeTo(out); + out.writeEnum(operationMode); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + static Diff readLifecyclePolicyDiffFrom(StreamInput in) throws IOException { + return AbstractDiffable.readDiffFrom(LifecyclePolicyMetadata::new, in); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/InitializePolicyContextStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/InitializePolicyContextStep.java new file mode 100644 index 0000000000000..c9046cb5eb7ec --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/InitializePolicyContextStep.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.index.Index; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; + +public final class InitializePolicyContextStep extends ClusterStateActionStep { + public static final String INITIALIZATION_PHASE = "new"; + public static final StepKey KEY = new StepKey(INITIALIZATION_PHASE, "init", "init"); + private static final Logger logger = LogManager.getLogger(InitializePolicyContextStep.class); + + public InitializePolicyContextStep(Step.StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } + + @Override + public ClusterState performAction(Index index, ClusterState clusterState) { + IndexMetaData indexMetaData = clusterState.getMetaData().index(index); + if (indexMetaData == null) { + logger.debug("[{}] lifecycle action for index [{}] executed but index no longer exists", getKey().getAction(), index.getName()); + // Index must have been since deleted, ignore it + return clusterState; + } + LifecycleExecutionState lifecycleState = LifecycleExecutionState + .fromIndexMetadata(indexMetaData); + if (lifecycleState.getLifecycleDate() != null) { + return clusterState; + } + + ClusterState.Builder newClusterStateBuilder = ClusterState.builder(clusterState); + + LifecycleExecutionState.Builder newCustomData = LifecycleExecutionState.builder(lifecycleState); + newCustomData.setIndexCreationDate(indexMetaData.getCreationDate()); + newClusterStateBuilder.metaData(MetaData.builder(clusterState.getMetaData()).put(IndexMetaData + .builder(indexMetaData) + .putCustom(ILM_CUSTOM_METADATA_KEY, newCustomData.build().asMap()))); + return newClusterStateBuilder.build(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleAction.java new file mode 100644 index 0000000000000..3e84813274d83 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleAction.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.util.List; + +/** + * Executes an action on an index related to its lifecycle. + */ +public interface LifecycleAction extends ToXContentObject, NamedWriteable { + + /** + * converts the {@link LifecycleAction}'s execution plan into a series of + * {@link Step}s that reference each other to preserve order of operations. + * @param client the client that will be used by {@link AsyncActionStep} and {@link AsyncWaitStep} steps + * @param phase the name of the phase this action is being executed within + * @param nextStepKey the next step to execute after this action's steps. If null, then there are no further + * steps to run. It is the responsibility of each {@link LifecycleAction} to implement this + * correctly and not forget to link to this final step so that the policy can continue. + * @return an ordered list of steps that represent the execution plan of the action + */ + List toSteps(Client client, String phase, @Nullable Step.StepKey nextStepKey); + + /** + * + * @param phase + * the name of the phase this action is being executed within + * @return the {@link StepKey}s for the steps which will be executed in this + * action + */ + List toStepKeys(String phase); + + /** + * @return true if this action is considered safe. An action is not safe if + * it will produce unwanted side effects or will get stuck when the + * action configuration is changed while an index is in this action + */ + boolean isSafeAction(); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleExecutionState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleExecutionState.java new file mode 100644 index 0000000000000..b2d42bca7338e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleExecutionState.java @@ -0,0 +1,319 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.metadata.IndexMetaData; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Contains information about the execution of a lifecycle policy for a single + * index, and serializes/deserializes this information to and from custom + * index metadata. + */ +public class LifecycleExecutionState { + public static final String ILM_CUSTOM_METADATA_KEY = "ilm"; + + private static final String PHASE = "phase"; + private static final String ACTION = "action"; + private static final String STEP = "step"; + private static final String INDEX_CREATION_DATE = "creation_date"; + private static final String PHASE_TIME = "phase_time"; + private static final String ACTION_TIME = "action_time"; + private static final String STEP_TIME = "step_time"; + private static final String FAILED_STEP = "failed_step"; + private static final String STEP_INFO = "step_info"; + private static final String PHASE_DEFINITION = "phase_definition"; + + private final String phase; + private final String action; + private final String step; + private final String failedStep; + private final String stepInfo; + private final String phaseDefinition; + private final Long lifecycleDate; + private final Long phaseTime; + private final Long actionTime; + private final Long stepTime; + + private LifecycleExecutionState(String phase, String action, String step, String failedStep, + String stepInfo, String phaseDefinition, Long lifecycleDate, + Long phaseTime, Long actionTime, Long stepTime) { + this.phase = phase; + this.action = action; + this.step = step; + this.failedStep = failedStep; + this.stepInfo = stepInfo; + this.phaseDefinition = phaseDefinition; + this.lifecycleDate = lifecycleDate; + this.phaseTime = phaseTime; + this.actionTime = actionTime; + this.stepTime = stepTime; + } + + /** + * Retrieves the execution state from an {@link IndexMetaData} based on the + * custom metadata. + * @param indexMetaData The metadata of the index to retrieve the execution + * state from. + * @return The execution state of that index. + */ + public static LifecycleExecutionState fromIndexMetadata(IndexMetaData indexMetaData) { + Map customData = indexMetaData.getCustomData(ILM_CUSTOM_METADATA_KEY); + customData = customData == null ? new HashMap<>() : customData; + return fromCustomMetadata(customData); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(LifecycleExecutionState state) { + return new Builder() + .setPhase(state.phase) + .setAction(state.action) + .setStep(state.step) + .setFailedStep(state.failedStep) + .setStepInfo(state.stepInfo) + .setPhaseDefinition(state.phaseDefinition) + .setIndexCreationDate(state.lifecycleDate) + .setPhaseTime(state.phaseTime) + .setActionTime(state.actionTime) + .setStepTime(state.stepTime); + } + + static LifecycleExecutionState fromCustomMetadata(Map customData) { + Builder builder = builder(); + if (customData.containsKey(PHASE)) { + builder.setPhase(customData.get(PHASE)); + } + if (customData.containsKey(ACTION)) { + builder.setAction(customData.get(ACTION)); + } + if (customData.containsKey(STEP)) { + builder.setStep(customData.get(STEP)); + } + if (customData.containsKey(FAILED_STEP)) { + builder.setFailedStep(customData.get(FAILED_STEP)); + } + if (customData.containsKey(STEP_INFO)) { + builder.setStepInfo(customData.get(STEP_INFO)); + } + if (customData.containsKey(PHASE_DEFINITION)) { + builder.setPhaseDefinition(customData.get(PHASE_DEFINITION)); + } + if (customData.containsKey(INDEX_CREATION_DATE)) { + try { + builder.setIndexCreationDate(Long.parseLong(customData.get(INDEX_CREATION_DATE))); + } catch (NumberFormatException e) { + throw new ElasticsearchException("Custom metadata field [{}] does not contain a valid long. Actual value: [{}]", + e, INDEX_CREATION_DATE, customData.get(INDEX_CREATION_DATE)); + } + } + if (customData.containsKey(PHASE_TIME)) { + try { + builder.setPhaseTime(Long.parseLong(customData.get(PHASE_TIME))); + } catch (NumberFormatException e) { + throw new ElasticsearchException("Custom metadata field [{}] does not contain a valid long. Actual value: [{}]", + e, PHASE_TIME, customData.get(PHASE_TIME)); + } + } + if (customData.containsKey(ACTION_TIME)) { + try { + builder.setActionTime(Long.parseLong(customData.get(ACTION_TIME))); + } catch (NumberFormatException e) { + throw new ElasticsearchException("Custom metadata field [{}] does not contain a valid long. Actual value: [{}]", + e, ACTION_TIME, customData.get(ACTION_TIME)); + } + } + if (customData.containsKey(STEP_TIME)) { + try { + builder.setStepTime(Long.parseLong(customData.get(STEP_TIME))); + } catch (NumberFormatException e) { + throw new ElasticsearchException("Custom metadata field [{}] does not contain a valid long. Actual value: [{}]", + e, STEP_TIME, customData.get(STEP_TIME)); + } + } + return builder.build(); + } + + /** + * Converts this object to an immutable map representation for use with + * {@link IndexMetaData.Builder#putCustom(String, Map)}. + * @return An immutable Map representation of this execution state. + */ + public Map asMap() { + Map result = new HashMap<>(); + if (phase != null) { + result.put(PHASE, phase); + } + if (action != null) { + result.put(ACTION, action); + } + if (step != null) { + result.put(STEP, step); + } + if (failedStep != null) { + result.put(FAILED_STEP, failedStep); + } + if (stepInfo != null) { + result.put(STEP_INFO, stepInfo); + } + if (lifecycleDate != null) { + result.put(INDEX_CREATION_DATE, String.valueOf(lifecycleDate)); + } + if (phaseTime != null) { + result.put(PHASE_TIME, String.valueOf(phaseTime)); + } + if (actionTime != null) { + result.put(ACTION_TIME, String.valueOf(actionTime)); + } + if (stepTime != null) { + result.put(STEP_TIME, String.valueOf(stepTime)); + } + if (phaseDefinition != null) { + result.put(PHASE_DEFINITION, String.valueOf(phaseDefinition)); + } + return Collections.unmodifiableMap(result); + } + + public String getPhase() { + return phase; + } + + public String getAction() { + return action; + } + + public String getStep() { + return step; + } + + public String getFailedStep() { + return failedStep; + } + + public String getStepInfo() { + return stepInfo; + } + + public String getPhaseDefinition() { + return phaseDefinition; + } + + public Long getLifecycleDate() { + return lifecycleDate; + } + + public Long getPhaseTime() { + return phaseTime; + } + + public Long getActionTime() { + return actionTime; + } + + public Long getStepTime() { + return stepTime; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + LifecycleExecutionState that = (LifecycleExecutionState) o; + return getLifecycleDate() == that.getLifecycleDate() && + getPhaseTime() == that.getPhaseTime() && + getActionTime() == that.getActionTime() && + getStepTime() == that.getStepTime() && + Objects.equals(getPhase(), that.getPhase()) && + Objects.equals(getAction(), that.getAction()) && + Objects.equals(getStep(), that.getStep()) && + Objects.equals(getFailedStep(), that.getFailedStep()) && + Objects.equals(getStepInfo(), that.getStepInfo()) && + Objects.equals(getPhaseDefinition(), that.getPhaseDefinition()); + } + + @Override + public int hashCode() { + return Objects.hash(getPhase(), getAction(), getStep(), getFailedStep(), getStepInfo(), getPhaseDefinition(), + getLifecycleDate(), getPhaseTime(), getActionTime(), getStepTime()); + } + + public static class Builder { + private String phase; + private String action; + private String step; + private String failedStep; + private String stepInfo; + private String phaseDefinition; + private Long indexCreationDate; + private Long phaseTime; + private Long actionTime; + private Long stepTime; + + public Builder setPhase(String phase) { + this.phase = phase; + return this; + } + + public Builder setAction(String action) { + this.action = action; + return this; + } + + public Builder setStep(String step) { + this.step = step; + return this; + } + + public Builder setFailedStep(String failedStep) { + this.failedStep = failedStep; + return this; + } + + public Builder setStepInfo(String stepInfo) { + this.stepInfo = stepInfo; + return this; + } + + public Builder setPhaseDefinition(String phaseDefinition) { + this.phaseDefinition = phaseDefinition; + return this; + } + + public Builder setIndexCreationDate(Long indexCreationDate) { + this.indexCreationDate = indexCreationDate; + return this; + } + + public Builder setPhaseTime(Long phaseTime) { + this.phaseTime = phaseTime; + return this; + } + + public Builder setActionTime(Long actionTime) { + this.actionTime = actionTime; + return this; + } + + public Builder setStepTime(Long stepTime) { + this.stepTime = stepTime; + return this; + } + + public LifecycleExecutionState build() { + return new LifecycleExecutionState(phase, action, step, failedStep, stepInfo, phaseDefinition, indexCreationDate, + phaseTime, actionTime, stepTime); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicy.java new file mode 100644 index 0000000000000..a56818355c3e5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicy.java @@ -0,0 +1,266 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Represents the lifecycle of an index from creation to deletion. A + * {@link LifecyclePolicy} is made up of a set of {@link Phase}s which it will + * move through. Policies are constrained by a {@link LifecycleType} which governs which + * {@link Phase}s and {@link LifecycleAction}s are allowed to be defined and in which order + * they are executed. + */ +public class LifecyclePolicy extends AbstractDiffable + implements ToXContentObject, Diffable { + private static final Logger logger = LogManager.getLogger(LifecyclePolicy.class); + + public static final ParseField PHASES_FIELD = new ParseField("phases"); + + @SuppressWarnings("unchecked") + public static ConstructingObjectParser PARSER = new ConstructingObjectParser<>("lifecycle_policy", false, + (a, name) -> { + List phases = (List) a[0]; + Map phaseMap = phases.stream().collect(Collectors.toMap(Phase::getName, Function.identity())); + return new LifecyclePolicy(TimeseriesLifecycleType.INSTANCE, name, phaseMap); + }); + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> Phase.parse(p, n), v -> { + throw new IllegalArgumentException("ordered " + PHASES_FIELD.getPreferredName() + " are not supported"); + }, PHASES_FIELD); + } + + private final String name; + private final LifecycleType type; + private final Map phases; + + /** + * @param name + * the name of this {@link LifecyclePolicy} + * @param phases + * a {@link Map} of {@link Phase}s which make up this + * {@link LifecyclePolicy}. + */ + public LifecyclePolicy(String name, Map phases) { + this(TimeseriesLifecycleType.INSTANCE, name, phases); + } + + /** + * For Serialization + */ + public LifecyclePolicy(StreamInput in) throws IOException { + type = in.readNamedWriteable(LifecycleType.class); + name = in.readString(); + phases = Collections.unmodifiableMap(in.readMap(StreamInput::readString, Phase::new)); + } + + /** + * @param type + * the {@link LifecycleType} of the policy + * @param name + * the name of this {@link LifecyclePolicy} + * @param phases + * a {@link Map} of {@link Phase}s which make up this + * {@link LifecyclePolicy}. + */ + public LifecyclePolicy(LifecycleType type, String name, Map phases) { + this.name = name; + this.phases = phases; + this.type = type; + this.type.validate(phases.values()); + } + + public static LifecyclePolicy parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(type); + out.writeString(name); + out.writeMap(phases, StreamOutput::writeString, (o, val) -> val.writeTo(o)); + } + + /** + * @return the name of this {@link LifecyclePolicy} + */ + public String getName() { + return name; + } + + /** + * @return the type of this {@link LifecyclePolicy} + */ + public LifecycleType getType() { + return type; + } + + /** + * @return the {@link Phase}s for this {@link LifecyclePolicy} in the order + * in which they will be executed. + */ + public Map getPhases() { + return phases; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject(PHASES_FIELD.getPreferredName()); + for (Phase phase : phases.values()) { + builder.field(phase.getName(), phase); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + /** + * This method is used to compile this policy into its execution plan built out + * of {@link Step} instances. The order of the {@link Phase}s and {@link LifecycleAction}s is + * determined by the {@link LifecycleType} associated with this policy. + * + * The order of the policy will have this structure: + * + * - initialize policy context step + * - phase-1 phase-after-step + * - ... phase-1 action steps + * - phase-2 phase-after-step + * - ... + * - terminal policy step + * + * We first initialize the policy's context and ensure that the index has proper settings set. + * Then we begin each phase's after-step along with all its actions as steps. Finally, we have + * a terminal step to inform us that this policy's steps are all complete. Each phase's `after` + * step is associated with the previous phase's phase. For example, the warm phase's `after` is + * associated with the hot phase so that it is clear that we haven't stepped into the warm phase + * just yet (until this step is complete). + * + * @param client The Elasticsearch Client to use during execution of {@link AsyncActionStep} + * and {@link AsyncWaitStep} steps. + * @return The list of {@link Step} objects in order of their execution. + */ + public List toSteps(Client client) { + List steps = new ArrayList<>(); + List orderedPhases = type.getOrderedPhases(phases); + ListIterator phaseIterator = orderedPhases.listIterator(orderedPhases.size()); + + // final step so that policy can properly update cluster-state with last action completed + steps.add(TerminalPolicyStep.INSTANCE); + Step.StepKey lastStepKey = TerminalPolicyStep.KEY; + + Phase phase = null; + // add steps for each phase, in reverse + while (phaseIterator.hasPrevious()) { + + Phase previousPhase = phaseIterator.previous(); + + // add `after` step for phase before next + if (phase != null) { + // after step should have the name of the previous phase since the index is still in the + // previous phase until the after condition is reached + Step.StepKey afterStepKey = new Step.StepKey(previousPhase.getName(), PhaseCompleteStep.NAME, PhaseCompleteStep.NAME); + Step phaseAfterStep = new PhaseCompleteStep(afterStepKey, lastStepKey); + steps.add(phaseAfterStep); + lastStepKey = phaseAfterStep.getKey(); + } + + phase = previousPhase; + List orderedActions = type.getOrderedActions(phase); + ListIterator actionIterator = orderedActions.listIterator(orderedActions.size()); + // add steps for each action, in reverse + while (actionIterator.hasPrevious()) { + LifecycleAction action = actionIterator.previous(); + List actionSteps = action.toSteps(client, phase.getName(), lastStepKey); + ListIterator actionStepsIterator = actionSteps.listIterator(actionSteps.size()); + while (actionStepsIterator.hasPrevious()) { + Step step = actionStepsIterator.previous(); + steps.add(step); + lastStepKey = step.getKey(); + } + } + } + + if (phase != null) { + // The very first after step is in a phase before the hot phase so call this "new" + Step.StepKey afterStepKey = new Step.StepKey("new", PhaseCompleteStep.NAME, PhaseCompleteStep.NAME); + Step phaseAfterStep = new PhaseCompleteStep(afterStepKey, lastStepKey); + steps.add(phaseAfterStep); + lastStepKey = phaseAfterStep.getKey(); + } + + // init step so that policy is guaranteed to have + steps.add(new InitializePolicyContextStep(InitializePolicyContextStep.KEY, lastStepKey)); + + Collections.reverse(steps); + + return steps; + } + + public boolean isActionSafe(StepKey stepKey) { + if ("new".equals(stepKey.getPhase())) { + return true; + } + Phase phase = phases.get(stepKey.getPhase()); + if (phase != null) { + LifecycleAction action = phase.getActions().get(stepKey.getAction()); + if (action != null) { + return action.isSafeAction(); + } else { + throw new IllegalArgumentException("Action [" + stepKey.getAction() + "] in phase [" + stepKey.getPhase() + + "] does not exist in policy [" + name + "]"); + } + } else { + throw new IllegalArgumentException("Phase [" + stepKey.getPhase() + "] does not exist in policy [" + name + "]"); + } + } + + @Override + public int hashCode() { + return Objects.hash(name, phases); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + LifecyclePolicy other = (LifecyclePolicy) obj; + return Objects.equals(name, other.name) && + Objects.equals(phases, other.phases); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyMetadata.java new file mode 100644 index 0000000000000..bfb2bee1edaac --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyMetadata.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.Map; +import java.util.Objects; + +public class LifecyclePolicyMetadata extends AbstractDiffable + implements ToXContentObject, Diffable { + + static final ParseField POLICY = new ParseField("policy"); + static final ParseField HEADERS = new ParseField("headers"); + static final ParseField VERSION = new ParseField("version"); + static final ParseField MODIFIED_DATE = new ParseField("modified_date"); + static final ParseField MODIFIED_DATE_STRING = new ParseField("modified_date_string"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("policy_metadata", + a -> { + LifecyclePolicy policy = (LifecyclePolicy) a[0]; + return new LifecyclePolicyMetadata(policy, (Map) a[1], (long) a[2], (long) a[3]); + }); + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), LifecyclePolicy::parse, POLICY); + PARSER.declareField(ConstructingObjectParser.constructorArg(), XContentParser::mapStrings, HEADERS, ValueType.OBJECT); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), VERSION); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), MODIFIED_DATE); + PARSER.declareString(ConstructingObjectParser.constructorArg(), MODIFIED_DATE_STRING); + } + + public static LifecyclePolicyMetadata parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + private final LifecyclePolicy policy; + private final Map headers; + private final long version; + private final long modifiedDate; + + public LifecyclePolicyMetadata(LifecyclePolicy policy, Map headers, long version, long modifiedDate) { + this.policy = policy; + this.headers = headers; + this.version = version; + this.modifiedDate = modifiedDate; + } + + @SuppressWarnings("unchecked") + public LifecyclePolicyMetadata(StreamInput in) throws IOException { + this.policy = new LifecyclePolicy(in); + this.headers = (Map) in.readGenericValue(); + this.version = in.readVLong(); + this.modifiedDate = in.readVLong(); + } + + public Map getHeaders() { + return headers; + } + + public LifecyclePolicy getPolicy() { + return policy; + } + + public String getName() { + return policy.getName(); + } + + public long getVersion() { + return version; + } + + public long getModifiedDate() { + return modifiedDate; + } + + public String getModifiedDateString() { + ZonedDateTime modifiedDateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(modifiedDate), ZoneOffset.UTC); + return modifiedDateTime.toString(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(POLICY.getPreferredName(), policy); + builder.field(HEADERS.getPreferredName(), headers); + builder.field(VERSION.getPreferredName(), version); + builder.field(MODIFIED_DATE.getPreferredName(), modifiedDate); + builder.field(MODIFIED_DATE_STRING.getPreferredName(), getModifiedDateString()); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + policy.writeTo(out); + out.writeGenericValue(headers); + out.writeVLong(version); + out.writeVLong(modifiedDate); + } + + @Override + public int hashCode() { + return Objects.hash(policy, headers, version, modifiedDate); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + LifecyclePolicyMetadata other = (LifecyclePolicyMetadata) obj; + return Objects.equals(policy, other.policy) && + Objects.equals(headers, other.headers) && + Objects.equals(version, other.version) && + Objects.equals(modifiedDate, other.modifiedDate); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleSettings.java new file mode 100644 index 0000000000000..4f8eb339db7e8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleSettings.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.TimeValue; + +/** + * Class encapsulating settings related to Index Lifecycle Management X-Pack Plugin + */ +public class LifecycleSettings { + public static final String LIFECYCLE_POLL_INTERVAL = "indices.lifecycle.poll_interval"; + public static final String LIFECYCLE_NAME = "index.lifecycle.name"; + + public static final Setting LIFECYCLE_POLL_INTERVAL_SETTING = Setting.positiveTimeSetting(LIFECYCLE_POLL_INTERVAL, + TimeValue.timeValueMinutes(10), Setting.Property.Dynamic, Setting.Property.NodeScope); + public static final Setting LIFECYCLE_NAME_SETTING = Setting.simpleString(LIFECYCLE_NAME, + Setting.Property.Dynamic, Setting.Property.IndexScope); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleType.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleType.java new file mode 100644 index 0000000000000..69be30fdfbd0e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleType.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.NamedWriteable; + +import java.security.Policy; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +public interface LifecycleType extends NamedWriteable { + + /** + * @return the first phase of this policy to execute + */ + List getOrderedPhases(Map phases); + + /** + * Returns the next phase thats available after + * currentPhaseName. Note that currentPhaseName + * does not need to exist in phases. + * + * If the current {@link Phase} is the last phase in the {@link Policy} this + * method will return null. + * + * If the phase is not valid for the lifecycle type an + * {@link IllegalArgumentException} will be thrown. + */ + String getNextPhaseName(String currentPhaseName, Map phases); + + /** + * Returns the previous phase thats available before + * currentPhaseName. Note that currentPhaseName + * does not need to exist in phases. + * + * If the current {@link Phase} is the first phase in the {@link Policy} + * this method will return null. + * + * If the phase is not valid for the lifecycle type an + * {@link IllegalArgumentException} will be thrown. + */ + String getPreviousPhaseName(String currentPhaseName, Map phases); + + List getOrderedActions(Phase phase); + + /** + * Returns the name of the next phase that is available in the phases after + * currentActionName. Note that currentActionName + * does not need to exist in the {@link Phase}. + * + * If the current action is the last action in the phase this method will + * return null. + * + * If the action is not valid for the phase an + * {@link IllegalArgumentException} will be thrown. + */ + String getNextActionName(String currentActionName, Phase phase); + + + /** + * validates whether the specified phases are valid for this + * policy instance. + * + * @param phases + * the phases to verify validity against + * @throws IllegalArgumentException + * if a specific phase or lack of a specific phase is invalid. + */ + void validate(Collection phases); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/OperationMode.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/OperationMode.java new file mode 100644 index 0000000000000..defc2e46818bc --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/OperationMode.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.admin.indices.shrink.ShrinkAction; + +/** + * Enum representing the different modes that Index Lifecycle Service can operate in. + */ +public enum OperationMode { + /** + * This represents a state where no policies are executed + */ + STOPPED { + @Override + public boolean isValidChange(OperationMode nextMode) { + return nextMode == RUNNING; + } + }, + + /** + * this represents a state where only sensitive actions (like {@link ShrinkAction}) will be executed + * until they finish, at which point the operation mode will move to STOPPED. + */ + STOPPING { + @Override + public boolean isValidChange(OperationMode nextMode) { + return nextMode == RUNNING || nextMode == STOPPED; + } + }, + + /** + * Normal operation where all policies are executed as normal. + */ + RUNNING { + @Override + public boolean isValidChange(OperationMode nextMode) { + return nextMode == STOPPING; + } + }; + + public abstract boolean isValidChange(OperationMode nextMode); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/Phase.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/Phase.java new file mode 100644 index 0000000000000..08b995ade1460 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/Phase.java @@ -0,0 +1,161 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Represents set of {@link LifecycleAction}s which should be executed at a + * particular point in the lifecycle of an index. + */ +public class Phase implements ToXContentObject, Writeable { + + public static final ParseField MIN_AGE = new ParseField("min_age"); + public static final ParseField ACTIONS_FIELD = new ParseField("actions"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("phase", false, + (a, name) -> new Phase(name, (TimeValue) a[0], ((List) a[1]).stream() + .collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())))); + static { + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), MIN_AGE.getPreferredName()), MIN_AGE, ValueType.VALUE); + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), + (p, c, n) -> p.namedObject(LifecycleAction.class, n, null), v -> { + throw new IllegalArgumentException("ordered " + ACTIONS_FIELD.getPreferredName() + " are not supported"); + }, ACTIONS_FIELD); + } + + public static Phase parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + private final String name; + private final Map actions; + private final TimeValue minimumAge; + + /** + * @param name + * the name of this {@link Phase}. + * @param minimumAge + * the age of the index when the index should move to this + * {@link Phase}. + * @param actions + * a {@link Map} of the {@link LifecycleAction}s to run when + * during his {@link Phase}. The keys in this map are the associated + * action names. The order of these actions is defined + * by the {@link LifecycleType} + */ + public Phase(String name, TimeValue minimumAge, Map actions) { + this.name = name; + if (minimumAge == null) { + this.minimumAge = TimeValue.ZERO; + } else { + this.minimumAge = minimumAge; + } + this.actions = actions; + } + + /** + * For Serialization + */ + public Phase(StreamInput in) throws IOException { + this.name = in.readString(); + this.minimumAge = in.readTimeValue(); + int size = in.readVInt(); + TreeMap actions = new TreeMap<>(); + for (int i = 0; i < size; i++) { + actions.put(in.readString(), in.readNamedWriteable(LifecycleAction.class)); + } + this.actions = actions; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeTimeValue(minimumAge); + out.writeVInt(actions.size()); + for (Map.Entry entry : actions.entrySet()) { + out.writeString(entry.getKey()); + out.writeNamedWriteable(entry.getValue()); + } + } + + /** + * @return the age of the index when the index should move to this + * {@link Phase}. + */ + public TimeValue getMinimumAge() { + return minimumAge; + } + + /** + * @return the name of this {@link Phase} + */ + public String getName() { + return name; + } + + /** + * @return a {@link Map} of the {@link LifecycleAction}s to run when during + * his {@link Phase}. + */ + public Map getActions() { + return actions; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MIN_AGE.getPreferredName(), minimumAge.getStringRep()); + builder.field(ACTIONS_FIELD.getPreferredName(), actions); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(name, minimumAge, actions); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Phase other = (Phase) obj; + return Objects.equals(name, other.name) && + Objects.equals(minimumAge, other.minimumAge) && + Objects.equals(actions, other.actions); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseCompleteStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseCompleteStep.java new file mode 100644 index 0000000000000..fa960bb9994b4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseCompleteStep.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +/** + * This is essentially a marker that a phase has ended, and we need to check + * the age of an index before proceeding to the next phase. + */ +public class PhaseCompleteStep extends Step { + public static final String NAME = "complete"; + + public PhaseCompleteStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseExecutionInfo.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseExecutionInfo.java new file mode 100644 index 0000000000000..1ba7390ed2202 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseExecutionInfo.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * This class contains information about the current phase being executed by Index + * Lifecycle Management on the specific index. + */ +public class PhaseExecutionInfo implements ToXContentObject, Writeable { + private static final ParseField POLICY_NAME_FIELD = new ParseField("policy"); + private static final ParseField PHASE_DEFINITION_FIELD = new ParseField("phase_definition"); + private static final ParseField VERSION_FIELD = new ParseField("version"); + private static final ParseField MODIFIED_DATE_IN_MILLIS_FIELD = new ParseField("modified_date_in_millis"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "phase_execution_info", false, + (a, name) -> new PhaseExecutionInfo((String) a[0], (Phase) a[1], (long) a[2], (long) a[3])); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), POLICY_NAME_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), Phase::parse, PHASE_DEFINITION_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), VERSION_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), MODIFIED_DATE_IN_MILLIS_FIELD); + } + + public static PhaseExecutionInfo parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + private final String policyName; + private final Phase phase; + private final long version; + private final long modifiedDate; + + /** + * This class holds information about the current phase that is being executed + * + * @param policyName the name of the policy being executed, this may not be the current policy assigned to an index + * @param phase the current phase definition executed + * @param version the version of the policyName being executed + * @param modifiedDate the time the executing version of the phase was modified + */ + public PhaseExecutionInfo(String policyName, @Nullable Phase phase, long version, long modifiedDate) { + this.policyName = policyName; + this.phase = phase; + this.version = version; + this.modifiedDate = modifiedDate; + } + + PhaseExecutionInfo(StreamInput in) throws IOException { + this.policyName = in.readString(); + this.phase = in.readOptionalWriteable(Phase::new); + this.version = in.readVLong(); + this.modifiedDate = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(policyName); + out.writeOptionalWriteable(phase); + out.writeVLong(version); + out.writeVLong(modifiedDate); + } + + public String getPolicyName() { + return policyName; + } + + public Phase getPhase() { + return phase; + } + + public long getVersion() { + return version; + } + + public long getModifiedDate() { + return modifiedDate; + } + + @Override + public int hashCode() { + return Objects.hash(policyName, phase, version, modifiedDate); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PhaseExecutionInfo other = (PhaseExecutionInfo) obj; + return Objects.equals(policyName, other.policyName) && + Objects.equals(phase, other.phase) && + Objects.equals(version, other.version) && + Objects.equals(modifiedDate, other.modifiedDate); + } + + @Override + public String toString() { + return Strings.toString(this, false, true); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(POLICY_NAME_FIELD.getPreferredName(), policyName); + if (phase != null) { + builder.field(PHASE_DEFINITION_FIELD.getPreferredName(), phase); + } + builder.field(VERSION_FIELD.getPreferredName(), version); + builder.timeField(MODIFIED_DATE_IN_MILLIS_FIELD.getPreferredName(), "modified_date", modifiedDate); + builder.endObject(); + return builder; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java new file mode 100644 index 0000000000000..15edd51908bfe --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +/** + * A {@link LifecycleAction} which force-merges the index. + */ +public class ReadOnlyAction implements LifecycleAction { + public static final String NAME = "readonly"; + public static final ReadOnlyAction INSTANCE = new ReadOnlyAction(); + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, false, ReadOnlyAction::new); + + public static ReadOnlyAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public ReadOnlyAction() { + } + + public ReadOnlyAction(StreamInput in) { + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + } + + @Override + public boolean isSafeAction() { + return true; + } + + @Override + public List toSteps(Client client, String phase, Step.StepKey nextStepKey) { + Step.StepKey key = new Step.StepKey(phase, NAME, NAME); + Settings readOnlySettings = Settings.builder().put(IndexMetaData.SETTING_BLOCKS_WRITE, true).build(); + return Collections.singletonList(new UpdateSettingsStep(key, nextStepKey, client, readOnlySettings)); + } + + @Override + public List toStepKeys(String phase) { + return Collections.singletonList(new Step.StepKey(phase, NAME, NAME)); + } + + @Override + public int hashCode() { + return ReadOnlyAction.class.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java new file mode 100644 index 0000000000000..78dce2db1b8c2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java @@ -0,0 +1,173 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * A {@link LifecycleAction} which deletes the index. + */ +public class RolloverAction implements LifecycleAction { + public static final String NAME = "rollover"; + public static final ParseField MAX_SIZE_FIELD = new ParseField("max_size"); + public static final ParseField MAX_DOCS_FIELD = new ParseField("max_docs"); + public static final ParseField MAX_AGE_FIELD = new ParseField("max_age"); + public static final String LIFECYCLE_ROLLOVER_ALIAS = "index.lifecycle.rollover_alias"; + public static final Setting LIFECYCLE_ROLLOVER_ALIAS_SETTING = Setting.simpleString(LIFECYCLE_ROLLOVER_ALIAS, + Setting.Property.Dynamic, Setting.Property.IndexScope); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + a -> new RolloverAction((ByteSizeValue) a[0], (TimeValue) a[1], (Long) a[2])); + static { + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_SIZE_FIELD.getPreferredName()), MAX_SIZE_FIELD, ValueType.VALUE); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_AGE_FIELD.getPreferredName()), MAX_AGE_FIELD, ValueType.VALUE); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), MAX_DOCS_FIELD); + } + + private final ByteSizeValue maxSize; + private final Long maxDocs; + private final TimeValue maxAge; + + public static RolloverAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public RolloverAction(ByteSizeValue maxSize, TimeValue maxAge, Long maxDocs) { + if (maxSize == null && maxAge == null && maxDocs == null) { + throw new IllegalArgumentException("At least one rollover condition must be set."); + } + this.maxSize = maxSize; + this.maxAge = maxAge; + this.maxDocs = maxDocs; + } + + public RolloverAction(StreamInput in) throws IOException { + if (in.readBoolean()) { + maxSize = new ByteSizeValue(in); + } else { + maxSize = null; + } + maxAge = in.readOptionalTimeValue(); + if (in.readBoolean()) { + maxDocs = in.readVLong(); + } else { + maxDocs = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + boolean hasMaxSize = maxSize != null; + out.writeBoolean(hasMaxSize); + if (hasMaxSize) { + maxSize.writeTo(out); + } + out.writeOptionalTimeValue(maxAge); + boolean hasMaxDocs = maxDocs != null; + out.writeBoolean(hasMaxDocs); + if (hasMaxDocs) { + out.writeVLong(maxDocs); + } + } + + @Override + public String getWriteableName() { + return NAME; + } + + public ByteSizeValue getMaxSize() { + return maxSize; + } + + public TimeValue getMaxAge() { + return maxAge; + } + + public Long getMaxDocs() { + return maxDocs; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (maxSize != null) { + builder.field(MAX_SIZE_FIELD.getPreferredName(), maxSize.getStringRep()); + } + if (maxAge != null) { + builder.field(MAX_AGE_FIELD.getPreferredName(), maxAge.getStringRep()); + } + if (maxDocs != null) { + builder.field(MAX_DOCS_FIELD.getPreferredName(), maxDocs); + } + builder.endObject(); + return builder; + } + + @Override + public boolean isSafeAction() { + return true; + } + + @Override + public List toSteps(Client client, String phase, Step.StepKey nextStepKey) { + StepKey updateDateStepKey = new StepKey(phase, NAME, UpdateRolloverLifecycleDateStep.NAME); + RolloverStep rolloverStep = new RolloverStep(new StepKey(phase, NAME, RolloverStep.NAME), updateDateStepKey, client, + maxSize, maxAge, maxDocs); + UpdateRolloverLifecycleDateStep updateDateStep = new UpdateRolloverLifecycleDateStep(updateDateStepKey, nextStepKey); + return Arrays.asList(rolloverStep, updateDateStep); + } + + @Override + public List toStepKeys(String phase) { + StepKey rolloverStepKey = new StepKey(phase, NAME, RolloverStep.NAME); + StepKey updateDateStepKey = new StepKey(phase, NAME, UpdateRolloverLifecycleDateStep.NAME); + return Arrays.asList(rolloverStepKey, updateDateStepKey); + } + + @Override + public int hashCode() { + return Objects.hash(maxSize, maxAge, maxDocs); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + RolloverAction other = (RolloverAction) obj; + return Objects.equals(maxSize, other.maxSize) && + Objects.equals(maxAge, other.maxAge) && + Objects.equals(maxDocs, other.maxDocs); + } + + @Override + public String toString() { + return Strings.toString(this); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStep.java new file mode 100644 index 0000000000000..94505c620e750 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStep.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; + +public class RolloverStep extends AsyncWaitStep { + public static final String NAME = "attempt_rollover"; + + private ByteSizeValue maxSize; + private TimeValue maxAge; + private Long maxDocs; + + public RolloverStep(StepKey key, StepKey nextStepKey, Client client, ByteSizeValue maxSize, TimeValue maxAge, + Long maxDocs) { + super(key, nextStepKey, client); + this.maxSize = maxSize; + this.maxAge = maxAge; + this.maxDocs = maxDocs; + } + + @Override + public void evaluateCondition(IndexMetaData indexMetaData, Listener listener) { + String rolloverAlias = RolloverAction.LIFECYCLE_ROLLOVER_ALIAS_SETTING.get(indexMetaData.getSettings()); + + if (Strings.isNullOrEmpty(rolloverAlias)) { + listener.onFailure(new IllegalArgumentException(String.format(Locale.ROOT, + "setting [%s] for index [%s] is empty or not defined", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, + indexMetaData.getIndex().getName()))); + return; + } + + if (indexMetaData.getAliases().containsKey(rolloverAlias) == false) { + listener.onFailure(new IllegalArgumentException(String.format(Locale.ROOT, + "%s [%s] does not point to index [%s]", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, rolloverAlias, + indexMetaData.getIndex().getName()))); + return; + } + + RolloverRequest rolloverRequest = new RolloverRequest(rolloverAlias, null); + if (maxAge != null) { + rolloverRequest.addMaxIndexAgeCondition(maxAge); + } + if (maxSize != null) { + rolloverRequest.addMaxIndexSizeCondition(maxSize); + } + if (maxDocs != null) { + rolloverRequest.addMaxIndexDocsCondition(maxDocs); + } + getClient().admin().indices().rolloverIndex(rolloverRequest, + ActionListener.wrap(response -> listener.onResponse(response.isRolledOver(), new EmptyInfo()), listener::onFailure)); + } + + ByteSizeValue getMaxSize() { + return maxSize; + } + + TimeValue getMaxAge() { + return maxAge; + } + + Long getMaxDocs() { + return maxDocs; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), maxSize, maxAge, maxDocs); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RolloverStep other = (RolloverStep) obj; + return super.equals(obj) && + Objects.equals(maxSize, other.maxSize) && + Objects.equals(maxAge, other.maxAge) && + Objects.equals(maxDocs, other.maxDocs); + } + + // We currently have no information to provide for this AsyncWaitStep, so this is an empty object + private class EmptyInfo implements ToXContentObject { + private EmptyInfo() {} + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStep.java new file mode 100644 index 0000000000000..0d706dca10445 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStep.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; +import java.util.stream.StreamSupport; + +/** + * This {@link Step} evaluates whether force_merge was successful + */ +public class SegmentCountStep extends AsyncWaitStep { + public static final String NAME = "segment-count"; + + private final int maxNumSegments; + + public SegmentCountStep(StepKey key, StepKey nextStepKey, Client client, int maxNumSegments) { + super(key, nextStepKey, client); + this.maxNumSegments = maxNumSegments; + } + + public int getMaxNumSegments() { + return maxNumSegments; + } + + @Override + public void evaluateCondition(IndexMetaData indexMetaData, Listener listener) { + getClient().admin().indices().segments(new IndicesSegmentsRequest(indexMetaData.getIndex().getName()), + ActionListener.wrap(response -> { + long numberShardsLeftToMerge = + StreamSupport.stream(response.getIndices().get(indexMetaData.getIndex().getName()).spliterator(), false) + .filter(iss -> Arrays.stream(iss.getShards()).anyMatch(p -> p.getSegments().size() > maxNumSegments)).count(); + listener.onResponse(numberShardsLeftToMerge == 0, new Info(numberShardsLeftToMerge)); + }, listener::onFailure)); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), maxNumSegments); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + SegmentCountStep other = (SegmentCountStep) obj; + return super.equals(obj) + && Objects.equals(maxNumSegments, other.maxNumSegments); + } + + public static class Info implements ToXContentObject { + + private final long numberShardsLeftToMerge; + + static final ParseField SHARDS_TO_MERGE = new ParseField("shards_left_to_merge"); + static final ParseField MESSAGE = new ParseField("message"); + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("segment_count_step_info", + a -> new Info((long) a[0])); + static { + PARSER.declareLong(ConstructingObjectParser.constructorArg(), SHARDS_TO_MERGE); + PARSER.declareString((i, s) -> {}, MESSAGE); + } + + public Info(long numberShardsLeftToMerge) { + this.numberShardsLeftToMerge = numberShardsLeftToMerge; + } + + public long getNumberShardsLeftToMerge() { + return numberShardsLeftToMerge; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MESSAGE.getPreferredName(), + "Waiting for [" + numberShardsLeftToMerge + "] shards " + "to forcemerge"); + builder.field(SHARDS_TO_MERGE.getPreferredName(), numberShardsLeftToMerge); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(numberShardsLeftToMerge); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Info other = (Info) obj; + return Objects.equals(numberShardsLeftToMerge, other.numberShardsLeftToMerge); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStep.java new file mode 100644 index 0000000000000..d064a3d74d517 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStep.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexNotFoundException; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +public class SetSingleNodeAllocateStep extends AsyncActionStep { + public static final String NAME = "set-single-node-allocation"; + + private static final AllocationDeciders ALLOCATION_DECIDERS = new AllocationDeciders(Settings.EMPTY, Collections.singletonList( + new FilterAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)))); + + public SetSingleNodeAllocateStep(StepKey key, StepKey nextStepKey, Client client) { + super(key, nextStepKey, client); + } + + @Override + public void performAction(IndexMetaData indexMetaData, ClusterState clusterState, Listener listener) { + RoutingAllocation allocation = new RoutingAllocation(ALLOCATION_DECIDERS, clusterState.getRoutingNodes(), clusterState, null, + System.nanoTime()); + List validNodeIds = new ArrayList<>(); + Optional anyShard = clusterState.getRoutingTable().allShards(indexMetaData.getIndex().getName()).stream().findAny(); + if (anyShard.isPresent()) { + // Iterate through the nodes finding ones that are acceptable for the current allocation rules of the shard + for (RoutingNode node : clusterState.getRoutingNodes()) { + boolean canRemainOnCurrentNode = ALLOCATION_DECIDERS.canRemain(anyShard.get(), node, allocation) + .type() == Decision.Type.YES; + if (canRemainOnCurrentNode) { + DiscoveryNode discoveryNode = node.node(); + validNodeIds.add(discoveryNode.getId()); + } + } + // Shuffle the list of nodes so the one we pick is random + Randomness.shuffle(validNodeIds); + Optional nodeId = validNodeIds.stream().findAny(); + if (nodeId.isPresent()) { + Settings settings = Settings.builder() + .put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id", nodeId.get()).build(); + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indexMetaData.getIndex().getName()) + .settings(settings); + getClient().admin().indices().updateSettings(updateSettingsRequest, + ActionListener.wrap(response -> listener.onResponse(true), listener::onFailure)); + } else { + // No nodes currently match the allocation rules so just wait until there is one that does + listener.onResponse(false); + } + } else { + // There are no shards for the index, the index might be gone + listener.onFailure(new IndexNotFoundException(indexMetaData.getIndex())); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkAction.java new file mode 100644 index 0000000000000..27b9e87d28b06 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkAction.java @@ -0,0 +1,140 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * A {@link LifecycleAction} which shrinks the index. + */ +public class ShrinkAction implements LifecycleAction { + public static final String NAME = "shrink"; + public static final String SHRUNKEN_INDEX_PREFIX = "shrink-"; + public static final ParseField NUMBER_OF_SHARDS_FIELD = new ParseField("number_of_shards"); + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(NAME, a -> new ShrinkAction((Integer) a[0])); + + static { + PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_SHARDS_FIELD); + } + + private int numberOfShards; + + public static ShrinkAction parse(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public ShrinkAction(int numberOfShards) { + if (numberOfShards <= 0) { + throw new IllegalArgumentException("[" + NUMBER_OF_SHARDS_FIELD.getPreferredName() + "] must be greater than 0"); + } + this.numberOfShards = numberOfShards; + } + + public ShrinkAction(StreamInput in) throws IOException { + this.numberOfShards = in.readVInt(); + } + + int getNumberOfShards() { + return numberOfShards; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(numberOfShards); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NUMBER_OF_SHARDS_FIELD.getPreferredName(), numberOfShards); + builder.endObject(); + return builder; + } + + @Override + public boolean isSafeAction() { + return false; + } + + @Override + public List toSteps(Client client, String phase, Step.StepKey nextStepKey) { + Settings readOnlySettings = Settings.builder().put(IndexMetaData.SETTING_BLOCKS_WRITE, true).build(); + + StepKey readOnlyKey = new StepKey(phase, NAME, ReadOnlyAction.NAME); + StepKey setSingleNodeKey = new StepKey(phase, NAME, SetSingleNodeAllocateStep.NAME); + StepKey allocationRoutedKey = new StepKey(phase, NAME, AllocationRoutedStep.NAME); + StepKey shrinkKey = new StepKey(phase, NAME, ShrinkStep.NAME); + StepKey enoughShardsKey = new StepKey(phase, NAME, ShrunkShardsAllocatedStep.NAME); + StepKey copyMetadataKey = new StepKey(phase, NAME, CopyExecutionStateStep.NAME); + StepKey aliasKey = new StepKey(phase, NAME, ShrinkSetAliasStep.NAME); + StepKey isShrunkIndexKey = new StepKey(phase, NAME, ShrunkenIndexCheckStep.NAME); + + UpdateSettingsStep readOnlyStep = new UpdateSettingsStep(readOnlyKey, setSingleNodeKey, client, readOnlySettings); + SetSingleNodeAllocateStep setSingleNodeStep = new SetSingleNodeAllocateStep(setSingleNodeKey, allocationRoutedKey, client); + AllocationRoutedStep allocationStep = new AllocationRoutedStep(allocationRoutedKey, shrinkKey, false); + ShrinkStep shrink = new ShrinkStep(shrinkKey, enoughShardsKey, client, numberOfShards, SHRUNKEN_INDEX_PREFIX); + ShrunkShardsAllocatedStep allocated = new ShrunkShardsAllocatedStep(enoughShardsKey, copyMetadataKey, SHRUNKEN_INDEX_PREFIX); + CopyExecutionStateStep copyMetadata = new CopyExecutionStateStep(copyMetadataKey, aliasKey, SHRUNKEN_INDEX_PREFIX); + ShrinkSetAliasStep aliasSwapAndDelete = new ShrinkSetAliasStep(aliasKey, isShrunkIndexKey, client, SHRUNKEN_INDEX_PREFIX); + ShrunkenIndexCheckStep waitOnShrinkTakeover = new ShrunkenIndexCheckStep(isShrunkIndexKey, nextStepKey, SHRUNKEN_INDEX_PREFIX); + return Arrays.asList(readOnlyStep, setSingleNodeStep, allocationStep, shrink, allocated, copyMetadata, + aliasSwapAndDelete, waitOnShrinkTakeover); + } + + @Override + public List toStepKeys(String phase) { + StepKey readOnlyKey = new StepKey(phase, NAME, ReadOnlyAction.NAME); + StepKey setSingleNodeKey = new StepKey(phase, NAME, SetSingleNodeAllocateStep.NAME); + StepKey allocationRoutedKey = new StepKey(phase, NAME, AllocationRoutedStep.NAME); + StepKey shrinkKey = new StepKey(phase, NAME, ShrinkStep.NAME); + StepKey enoughShardsKey = new StepKey(phase, NAME, ShrunkShardsAllocatedStep.NAME); + StepKey copyMetadataKey = new StepKey(phase, NAME, CopyExecutionStateStep.NAME); + StepKey aliasKey = new StepKey(phase, NAME, ShrinkSetAliasStep.NAME); + StepKey isShrunkIndexKey = new StepKey(phase, NAME, ShrunkenIndexCheckStep.NAME); + return Arrays.asList(readOnlyKey, setSingleNodeKey, allocationRoutedKey, shrinkKey, enoughShardsKey, + copyMetadataKey, aliasKey, isShrunkIndexKey); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ShrinkAction that = (ShrinkAction) o; + return Objects.equals(numberOfShards, that.numberOfShards); + } + + @Override + public int hashCode() { + return Objects.hash(numberOfShards); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStep.java new file mode 100644 index 0000000000000..b9e0e00eeb600 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStep.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; + +import java.util.Objects; + +public class ShrinkSetAliasStep extends AsyncActionStep { + public static final String NAME = "aliases"; + private String shrunkIndexPrefix; + + public ShrinkSetAliasStep(StepKey key, StepKey nextStepKey, Client client, String shrunkIndexPrefix) { + super(key, nextStepKey, client); + this.shrunkIndexPrefix = shrunkIndexPrefix; + } + + String getShrunkIndexPrefix() { + return shrunkIndexPrefix; + } + + @Override + public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + // get source index + String index = indexMetaData.getIndex().getName(); + // get target shrink index + String targetIndexName = shrunkIndexPrefix + index; + + IndicesAliasesRequest aliasesRequest = new IndicesAliasesRequest() + .addAliasAction(IndicesAliasesRequest.AliasActions.removeIndex().index(index)) + .addAliasAction(IndicesAliasesRequest.AliasActions.add().index(targetIndexName).alias(index)); + + getClient().admin().indices().aliases(aliasesRequest, ActionListener.wrap(response -> + listener.onResponse(true), listener::onFailure)); + } + + @Override + public boolean indexSurvives() { + return false; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), shrunkIndexPrefix); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + ShrinkSetAliasStep other = (ShrinkSetAliasStep) obj; + return super.equals(obj) && + Objects.equals(shrunkIndexPrefix, other.shrunkIndexPrefix); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStep.java new file mode 100644 index 0000000000000..8eeaed82e4c12 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStep.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; + +import java.util.Objects; + +public class ShrinkStep extends AsyncActionStep { + public static final String NAME = "shrink"; + + private int numberOfShards; + private String shrunkIndexPrefix; + + public ShrinkStep(StepKey key, StepKey nextStepKey, Client client, int numberOfShards, String shrunkIndexPrefix) { + super(key, nextStepKey, client); + this.numberOfShards = numberOfShards; + this.shrunkIndexPrefix = shrunkIndexPrefix; + } + + public int getNumberOfShards() { + return numberOfShards; + } + + String getShrunkIndexPrefix() { + return shrunkIndexPrefix; + } + + @Override + public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); + if (lifecycleState.getLifecycleDate() == null) { + throw new IllegalStateException("source index [" + indexMetaData.getIndex().getName() + + "] is missing lifecycle date"); + } + + String lifecycle = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexMetaData.getSettings()); + + Settings relevantTargetSettings = Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, indexMetaData.getNumberOfReplicas()) + .put(LifecycleSettings.LIFECYCLE_NAME, lifecycle) + .put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id", (String) null) // need to remove the single shard + // allocation so replicas can be allocated + .build(); + + String shrunkenIndexName = shrunkIndexPrefix + indexMetaData.getIndex().getName(); + ResizeRequest resizeRequest = new ResizeRequest(shrunkenIndexName, indexMetaData.getIndex().getName()); + resizeRequest.setCopySettings(true); + indexMetaData.getAliases().values().spliterator().forEachRemaining(aliasMetaDataObjectCursor -> { + resizeRequest.getTargetIndexRequest().alias(new Alias(aliasMetaDataObjectCursor.value.alias())); + }); + resizeRequest.getTargetIndexRequest().settings(relevantTargetSettings); + + getClient().admin().indices().resizeIndex(resizeRequest, ActionListener.wrap(response -> { + // TODO(talevy): when is this not acknowledged? + listener.onResponse(response.isAcknowledged()); + }, listener::onFailure)); + + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), numberOfShards, shrunkIndexPrefix); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + ShrinkStep other = (ShrinkStep) obj; + return super.equals(obj) && + Objects.equals(numberOfShards, other.numberOfShards) && + Objects.equals(shrunkIndexPrefix, other.shrunkIndexPrefix); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStep.java new file mode 100644 index 0000000000000..b64ebf5e46b5a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStep.java @@ -0,0 +1,147 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; + +import java.io.IOException; +import java.util.Objects; + +public class ShrunkShardsAllocatedStep extends ClusterStateWaitStep { + public static final String NAME = "shrunk-shards-allocated"; + private String shrunkIndexPrefix; + + public ShrunkShardsAllocatedStep(StepKey key, StepKey nextStepKey, String shrunkIndexPrefix) { + super(key, nextStepKey); + this.shrunkIndexPrefix = shrunkIndexPrefix; + } + + String getShrunkIndexPrefix() { + return shrunkIndexPrefix; + } + + @Override + public Result isConditionMet(Index index, ClusterState clusterState) { + // We only want to make progress if all shards of the shrunk index are + // active + boolean indexExists = clusterState.metaData().index(shrunkIndexPrefix + index.getName()) != null; + if (indexExists == false) { + return new Result(false, new Info(false, -1, false)); + } + boolean allShardsActive = ActiveShardCount.ALL.enoughShardsActive(clusterState, shrunkIndexPrefix + index.getName()); + int numShrunkIndexShards = clusterState.metaData().index(shrunkIndexPrefix + index.getName()).getNumberOfShards(); + if (allShardsActive) { + return new Result(true, null); + } else { + return new Result(false, new Info(true, numShrunkIndexShards, allShardsActive)); + } + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), shrunkIndexPrefix); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + ShrunkShardsAllocatedStep other = (ShrunkShardsAllocatedStep) obj; + return super.equals(obj) && Objects.equals(shrunkIndexPrefix, other.shrunkIndexPrefix); + } + + public static final class Info implements ToXContentObject { + + private final int actualShards; + private final boolean shrunkIndexExists; + private final boolean allShardsActive; + private final String message; + + static final ParseField ACTUAL_SHARDS = new ParseField("actual_shards"); + static final ParseField SHRUNK_INDEX_EXISTS = new ParseField("shrunk_index_exists"); + static final ParseField ALL_SHARDS_ACTIVE = new ParseField("all_shards_active"); + static final ParseField MESSAGE = new ParseField("message"); + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("shrunk_shards_allocated_step_info", + a -> new Info((boolean) a[0], (int) a[1], (boolean) a[2])); + static { + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), SHRUNK_INDEX_EXISTS); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), ACTUAL_SHARDS); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), ALL_SHARDS_ACTIVE); + PARSER.declareString((i, s) -> {}, MESSAGE); + } + + public Info(boolean shrunkIndexExists, int actualShards, boolean allShardsActive) { + this.actualShards = actualShards; + this.shrunkIndexExists = shrunkIndexExists; + this.allShardsActive = allShardsActive; + if (shrunkIndexExists == false) { + message = "Waiting for shrunk index to be created"; + } else if (allShardsActive == false) { + message = "Waiting for all shard copies to be active"; + } else { + message = ""; + } + } + + public int getActualShards() { + return actualShards; + } + + public boolean shrunkIndexExists() { + return shrunkIndexExists; + } + + public boolean allShardsActive() { + return allShardsActive; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MESSAGE.getPreferredName(), message); + builder.field(SHRUNK_INDEX_EXISTS.getPreferredName(), shrunkIndexExists); + builder.field(ACTUAL_SHARDS.getPreferredName(), actualShards); + builder.field(ALL_SHARDS_ACTIVE.getPreferredName(), allShardsActive); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(shrunkIndexExists, actualShards, allShardsActive); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Info other = (Info) obj; + return Objects.equals(shrunkIndexExists, other.shrunkIndexExists) && + Objects.equals(actualShards, other.actualShards) && + Objects.equals(allShardsActive, other.allShardsActive); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStep.java new file mode 100644 index 0000000000000..201e27b561547 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStep.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; + +import java.io.IOException; +import java.util.Objects; + +public class ShrunkenIndexCheckStep extends ClusterStateWaitStep { + public static final String NAME = "is-shrunken-index"; + private static final Logger logger = LogManager.getLogger(InitializePolicyContextStep.class); + private String shrunkIndexPrefix; + + public ShrunkenIndexCheckStep(StepKey key, StepKey nextStepKey, String shrunkIndexPrefix) { + super(key, nextStepKey); + this.shrunkIndexPrefix = shrunkIndexPrefix; + } + + String getShrunkIndexPrefix() { + return shrunkIndexPrefix; + } + + @Override + public Result isConditionMet(Index index, ClusterState clusterState) { + IndexMetaData idxMeta = clusterState.getMetaData().index(index); + if (idxMeta == null) { + logger.debug("[{}] lifecycle action for index [{}] executed but index no longer exists", getKey().getAction(), index.getName()); + // Index must have been since deleted, ignore it + return new Result(false, null); + } + String shrunkenIndexSource = IndexMetaData.INDEX_SHRINK_SOURCE_NAME.get( + clusterState.metaData().index(index).getSettings()); + if (Strings.isNullOrEmpty(shrunkenIndexSource)) { + throw new IllegalStateException("step[" + NAME + "] is checking an un-shrunken index[" + index.getName() + "]"); + } + boolean isConditionMet = index.getName().equals(shrunkIndexPrefix + shrunkenIndexSource) && + clusterState.metaData().index(shrunkenIndexSource) == null; + if (isConditionMet) { + return new Result(true, null); + } else { + return new Result(false, new Info(shrunkenIndexSource)); + } + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), shrunkIndexPrefix); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + ShrunkenIndexCheckStep other = (ShrunkenIndexCheckStep) obj; + return super.equals(obj) && + Objects.equals(shrunkIndexPrefix, other.shrunkIndexPrefix); + } + + public static final class Info implements ToXContentObject { + + private final String originalIndexName; + private final String message; + + static final ParseField ORIGINAL_INDEX_NAME = new ParseField("original_index_name"); + static final ParseField MESSAGE = new ParseField("message"); + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("shrunken_index_check_step_info", + a -> new Info((String) a[0])); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ORIGINAL_INDEX_NAME); + PARSER.declareString((i, s) -> {}, MESSAGE); + } + + public Info(String originalIndexName) { + this.originalIndexName = originalIndexName; + this.message = "Waiting for original index [" + originalIndexName + "] to be deleted"; + } + + public String getOriginalIndexName() { + return originalIndexName; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MESSAGE.getPreferredName(), message); + builder.field(ORIGINAL_INDEX_NAME.getPreferredName(), originalIndexName); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(originalIndexName); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Info other = (Info) obj; + return Objects.equals(originalIndexName, other.originalIndexName); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequest.java new file mode 100644 index 0000000000000..de38a5e092ae2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequest.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; + +public class StartILMRequest extends AcknowledgedRequest { + + public StartILMRequest() { + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return 64; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/Step.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/Step.java new file mode 100644 index 0000000000000..7152b093e62ef --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/Step.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * A {@link LifecycleAction} which deletes the index. + */ +public abstract class Step { + private final StepKey key; + private final StepKey nextStepKey; + + public Step(StepKey key, StepKey nextStepKey) { + this.key = key; + this.nextStepKey = nextStepKey; + } + + public final StepKey getKey() { + return key; + } + + public final StepKey getNextStepKey() { + return nextStepKey; + } + + @Override + public int hashCode() { + return Objects.hash(key, nextStepKey); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Step other = (Step) obj; + return Objects.equals(key, other.key) && + Objects.equals(nextStepKey, other.nextStepKey); + } + + @Override + public String toString() { + return key + " => " + nextStepKey; + } + + public static final class StepKey implements Writeable, ToXContentObject { + private final String phase; + private final String action; + private final String name; + + public static final ParseField PHASE_FIELD = new ParseField("phase"); + public static final ParseField ACTION_FIELD = new ParseField("action"); + public static final ParseField NAME_FIELD = new ParseField("name"); + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("stepkey", a -> new StepKey((String) a[0], (String) a[1], (String) a[2])); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), PHASE_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), ACTION_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME_FIELD); + } + + public StepKey(String phase, String action, String name) { + this.phase = phase; + this.action = action; + this.name = name; + } + + public StepKey(StreamInput in) throws IOException { + this.phase = in.readString(); + this.action = in.readString(); + this.name = in.readString(); + } + + public static StepKey parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(phase); + out.writeString(action); + out.writeString(name); + } + + public String getPhase() { + return phase; + } + + public String getAction() { + return action; + } + + public String getName() { + return name; + } + + @Override + public int hashCode() { + return Objects.hash(phase, action, name); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + StepKey other = (StepKey) obj; + return Objects.equals(phase, other.phase) && + Objects.equals(action, other.action) && + Objects.equals(name, other.name); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(PHASE_FIELD.getPreferredName(), phase); + builder.field(ACTION_FIELD.getPreferredName(), action); + builder.field(NAME_FIELD.getPreferredName(), name); + builder.endObject(); + return builder; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequest.java new file mode 100644 index 0000000000000..3a2d458406b30 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequest.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; + +public class StopILMRequest extends AcknowledgedRequest { + + public StopILMRequest() { + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return 75; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/TerminalPolicyStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/TerminalPolicyStep.java new file mode 100644 index 0000000000000..4ba1b4fd83c60 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/TerminalPolicyStep.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +public class TerminalPolicyStep extends Step { + public static final String COMPLETED_PHASE = "completed"; + public static final StepKey KEY = new StepKey(COMPLETED_PHASE, "completed", "completed"); + public static final TerminalPolicyStep INSTANCE = new TerminalPolicyStep(KEY, null); + + TerminalPolicyStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleType.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleType.java new file mode 100644 index 0000000000000..17c9eaf17c083 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleType.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.set.Sets; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * Represents the lifecycle of an index from creation to deletion. A + * {@link TimeseriesLifecycleType} is made up of a set of {@link Phase}s which it will + * move through. Soon we will constrain the phases using some kinda of lifecycle + * type which will allow only particular {@link Phase}s to be defined, will + * dictate the order in which the {@link Phase}s are executed and will define + * which {@link LifecycleAction}s are allowed in each phase. + */ +public class TimeseriesLifecycleType implements LifecycleType { + public static final TimeseriesLifecycleType INSTANCE = new TimeseriesLifecycleType(); + + public static final String TYPE = "timeseries"; + static final List VALID_PHASES = Arrays.asList("hot", "warm", "cold", "delete"); + static final List ORDERED_VALID_HOT_ACTIONS = Collections.singletonList(RolloverAction.NAME); + static final List ORDERED_VALID_WARM_ACTIONS = Arrays.asList(ReadOnlyAction.NAME, AllocateAction.NAME, + ShrinkAction.NAME, ForceMergeAction.NAME); + static final List ORDERED_VALID_COLD_ACTIONS = Arrays.asList(AllocateAction.NAME); + static final List ORDERED_VALID_DELETE_ACTIONS = Arrays.asList(DeleteAction.NAME); + static final Set VALID_HOT_ACTIONS = Sets.newHashSet(ORDERED_VALID_HOT_ACTIONS); + static final Set VALID_WARM_ACTIONS = Sets.newHashSet(ORDERED_VALID_WARM_ACTIONS); + static final Set VALID_COLD_ACTIONS = Sets.newHashSet(ORDERED_VALID_COLD_ACTIONS); + static final Set VALID_DELETE_ACTIONS = Sets.newHashSet(ORDERED_VALID_DELETE_ACTIONS); + private static final Phase EMPTY_WARM_PHASE = new Phase("warm", TimeValue.ZERO, + Collections.singletonMap("readonly", ReadOnlyAction.INSTANCE)); + private static Map> ALLOWED_ACTIONS = new HashMap<>(); + + static { + ALLOWED_ACTIONS.put("hot", VALID_HOT_ACTIONS); + ALLOWED_ACTIONS.put("warm", VALID_WARM_ACTIONS); + ALLOWED_ACTIONS.put("cold", VALID_COLD_ACTIONS); + ALLOWED_ACTIONS.put("delete", VALID_DELETE_ACTIONS); + } + + private TimeseriesLifecycleType() { + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + } + + @Override + public String getWriteableName() { + return TYPE; + } + + public List getOrderedPhases(Map phases) { + List orderedPhases = new ArrayList<>(VALID_PHASES.size()); + for (String phaseName : VALID_PHASES) { + Phase phase = phases.get(phaseName); + if (phase != null) { + orderedPhases.add(phase); + } + } + return orderedPhases; + } + + @Override + public String getNextPhaseName(String currentPhaseName, Map phases) { + int index = VALID_PHASES.indexOf(currentPhaseName); + if (index < 0 && "new".equals(currentPhaseName) == false) { + throw new IllegalArgumentException("[" + currentPhaseName + "] is not a valid phase for lifecycle type [" + TYPE + "]"); + } else { + // Find the next phase after `index` that exists in `phases` and return it + while (++index < VALID_PHASES.size()) { + String phaseName = VALID_PHASES.get(index); + if (phases.containsKey(phaseName)) { + return phaseName; + } + } + // if we have exhausted VALID_PHASES and haven't found a matching + // phase in `phases` return null indicating there is no next phase + // available + return null; + } + } + + public String getPreviousPhaseName(String currentPhaseName, Map phases) { + if ("new".equals(currentPhaseName)) { + return null; + } + int index = VALID_PHASES.indexOf(currentPhaseName); + if (index < 0) { + throw new IllegalArgumentException("[" + currentPhaseName + "] is not a valid phase for lifecycle type [" + TYPE + "]"); + } else { + // Find the previous phase before `index` that exists in `phases` and return it + while (--index >=0) { + String phaseName = VALID_PHASES.get(index); + if (phases.containsKey(phaseName)) { + return phaseName; + } + } + // if we have exhausted VALID_PHASES and haven't found a matching + // phase in `phases` return null indicating there is no previous phase + // available + return null; + } + } + + public List getOrderedActions(Phase phase) { + Map actions = phase.getActions(); + switch (phase.getName()) { + case "hot": + return ORDERED_VALID_HOT_ACTIONS.stream().map(a -> actions.getOrDefault(a, null)) + .filter(Objects::nonNull).collect(Collectors.toList()); + case "warm": + return ORDERED_VALID_WARM_ACTIONS.stream() .map(a -> actions.getOrDefault(a, null)) + .filter(Objects::nonNull).collect(Collectors.toList()); + case "cold": + return ORDERED_VALID_COLD_ACTIONS.stream().map(a -> actions.getOrDefault(a, null)) + .filter(Objects::nonNull).collect(Collectors.toList()); + case "delete": + return ORDERED_VALID_DELETE_ACTIONS.stream().map(a -> actions.getOrDefault(a, null)) + .filter(Objects::nonNull).collect(Collectors.toList()); + default: + throw new IllegalArgumentException("lifecycle type[" + TYPE + "] does not support phase[" + phase.getName() + "]"); + } + } + + @Override + public String getNextActionName(String currentActionName, Phase phase) { + List orderedActionNames; + switch (phase.getName()) { + case "hot": + orderedActionNames = ORDERED_VALID_HOT_ACTIONS; + break; + case "warm": + orderedActionNames = ORDERED_VALID_WARM_ACTIONS; + break; + case "cold": + orderedActionNames = ORDERED_VALID_COLD_ACTIONS; + break; + case "delete": + orderedActionNames = ORDERED_VALID_DELETE_ACTIONS; + break; + default: + throw new IllegalArgumentException("lifecycle type[" + TYPE + "] does not support phase[" + phase.getName() + "]"); + } + + int index = orderedActionNames.indexOf(currentActionName); + if (index < 0) { + throw new IllegalArgumentException("[" + currentActionName + "] is not a valid action for phase [" + phase.getName() + + "] in lifecycle type [" + TYPE + "]"); + } else { + // Find the next action after `index` that exists in the phase and return it + while (++index < orderedActionNames.size()) { + String actionName = orderedActionNames.get(index); + if (phase.getActions().containsKey(actionName)) { + return actionName; + } + } + // if we have exhausted `validActions` and haven't found a matching + // action in the Phase return null indicating there is no next + // action available + return null; + } + } + + @Override + public void validate(Collection phases) { + phases.forEach(phase -> { + if (ALLOWED_ACTIONS.containsKey(phase.getName()) == false) { + throw new IllegalArgumentException("Timeseries lifecycle does not support phase [" + phase.getName() + "]"); + } + phase.getActions().forEach((actionName, action) -> { + if (ALLOWED_ACTIONS.get(phase.getName()).contains(actionName) == false) { + throw new IllegalArgumentException("invalid action [" + actionName + "] " + + "defined in phase [" + phase.getName() +"]"); + } + }); + }); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStep.java new file mode 100644 index 0000000000000..9d1c7701faa5a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStep.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.index.Index; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; + +public class UpdateRolloverLifecycleDateStep extends ClusterStateActionStep { + public static final String NAME = "update-rollover-lifecycle-date"; + + public UpdateRolloverLifecycleDateStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } + + @Override + public ClusterState performAction(Index index, ClusterState currentState) { + IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index); + // find the newly created index from the rollover and fetch its index.creation_date + String rolloverAlias = RolloverAction.LIFECYCLE_ROLLOVER_ALIAS_SETTING.get(indexMetaData.getSettings()); + if (Strings.isNullOrEmpty(rolloverAlias)) { + throw new IllegalStateException("setting [" + RolloverAction.LIFECYCLE_ROLLOVER_ALIAS + + "] is not set on index [" + indexMetaData.getIndex().getName() + "]"); + } + RolloverInfo rolloverInfo = indexMetaData.getRolloverInfos().get(rolloverAlias); + if (rolloverInfo == null) { + throw new IllegalStateException("index [" + indexMetaData.getIndex().getName() + "] has not rolled over yet"); + } + + LifecycleExecutionState.Builder newLifecycleState = LifecycleExecutionState + .builder(LifecycleExecutionState.fromIndexMetadata(indexMetaData)); + newLifecycleState.setIndexCreationDate(rolloverInfo.getTime()); + + IndexMetaData.Builder newIndexMetadata = IndexMetaData.builder(indexMetaData); + newIndexMetadata.putCustom(ILM_CUSTOM_METADATA_KEY, newLifecycleState.build().asMap()); + return ClusterState.builder(currentState).metaData(MetaData.builder(currentState.metaData()) + .put(newIndexMetadata)).build(); + } + + @Override + public int hashCode() { + return super.hashCode(); + } + + @Override + public boolean equals(Object obj) { + return obj != null && getClass() == obj.getClass() && super.equals(obj); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStep.java new file mode 100644 index 0000000000000..5602f6aa3f499 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStep.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; + +import java.util.Objects; + +public class UpdateSettingsStep extends AsyncActionStep { + public static final String NAME = "update-settings"; + + private final Settings settings; + + public UpdateSettingsStep(StepKey key, StepKey nextStepKey, Client client, Settings settings) { + super(key, nextStepKey, client); + this.settings = settings; + } + + @Override + public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indexMetaData.getIndex().getName()).settings(settings); + getClient().admin().indices().updateSettings(updateSettingsRequest, + ActionListener.wrap(response -> listener.onResponse(true), listener::onFailure)); + } + + public Settings getSettings() { + return settings; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), settings); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + UpdateSettingsStep other = (UpdateSettingsStep) obj; + return super.equals(obj) && + Objects.equals(settings, other.settings); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleAction.java new file mode 100644 index 0000000000000..2a2c65a2b18c8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleAction.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; + +import java.io.IOException; +import java.util.Objects; + +public class DeleteLifecycleAction + extends Action { + public static final DeleteLifecycleAction INSTANCE = new DeleteLifecycleAction(); + public static final String NAME = "cluster:admin/ilm/delete"; + + protected DeleteLifecycleAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Response extends AcknowledgedResponse implements ToXContentObject { + + public Response() { + } + + public Response(boolean acknowledged) { + super(acknowledged); + } + } + + public static class Request extends AcknowledgedRequest { + + public static final ParseField POLICY_FIELD = new ParseField("policy"); + + private String policyName; + + public Request(String policyName) { + this.policyName = policyName; + } + + public Request() { + } + + public String getPolicyName() { + return policyName; + } + + public void setPolicyName(final String policyName) { + this.policyName = policyName; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + policyName = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(policyName); + } + + @Override + public int hashCode() { + return Objects.hash(policyName); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(policyName, other.policyName); + } + + } + + @Override + public DeleteLifecycleActionRequestBuilder newRequestBuilder(final ElasticsearchClient client) { + return new DeleteLifecycleActionRequestBuilder(client, INSTANCE); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleActionRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleActionRequestBuilder.java new file mode 100644 index 0000000000000..294adeb6d4303 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleActionRequestBuilder.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +public class DeleteLifecycleActionRequestBuilder + extends ActionRequestBuilder { + + public DeleteLifecycleActionRequestBuilder( + final ElasticsearchClient client, + final Action action) { + super(client, action, new DeleteLifecycleAction.Request()); + } + + public DeleteLifecycleActionRequestBuilder setPolicyName(final String policyName) { + request.setPolicyName(policyName); + return this; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/ExplainLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/ExplainLifecycleAction.java new file mode 100644 index 0000000000000..8097f7c0f979d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/ExplainLifecycleAction.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleResponse; +import org.elasticsearch.client.ElasticsearchClient; + +public class ExplainLifecycleAction + extends Action { + public static final ExplainLifecycleAction INSTANCE = new ExplainLifecycleAction(); + public static final String NAME = "indices:admin/ilm/explain"; + + protected ExplainLifecycleAction() { + super(NAME); + } + + @Override + public ExplainLifecycleResponse newResponse() { + return new ExplainLifecycleResponse(); + } + + @Override + public ExplainLifecycleActionRequestBuilder newRequestBuilder(final ElasticsearchClient client) { + return new ExplainLifecycleActionRequestBuilder(client, INSTANCE); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/ExplainLifecycleActionRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/ExplainLifecycleActionRequestBuilder.java new file mode 100644 index 0000000000000..f4a92462d977e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/ExplainLifecycleActionRequestBuilder.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleResponse; + +public class ExplainLifecycleActionRequestBuilder + extends ClusterInfoRequestBuilder { + + public ExplainLifecycleActionRequestBuilder( + final ElasticsearchClient client, + final Action action) { + super(client, action, new ExplainLifecycleRequest()); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleAction.java new file mode 100644 index 0000000000000..f8c47905c8e63 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleAction.java @@ -0,0 +1,221 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +public class GetLifecycleAction extends Action { + public static final GetLifecycleAction INSTANCE = new GetLifecycleAction(); + public static final String NAME = "cluster:admin/ilm/get"; + + protected GetLifecycleAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private List policies; + + public Response() { + } + + public Response(List policies) { + this.policies = policies; + } + + public List getPolicies() { + return policies; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + for (LifecyclePolicyResponseItem item : policies) { + builder.startObject(item.getLifecyclePolicy().getName()); + builder.field("version", item.getVersion()); + builder.field("modified_date", item.getModifiedDate()); + builder.field("policy", item.getLifecyclePolicy()); + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + this.policies = in.readList(LifecyclePolicyResponseItem::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(policies); + } + + @Override + public int hashCode() { + return Objects.hash(policies); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(policies, other.policies); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + } + + public static class Request extends AcknowledgedRequest { + private String[] policyNames; + + public Request(String... policyNames) { + if (policyNames == null) { + throw new IllegalArgumentException("ids cannot be null"); + } + this.policyNames = policyNames; + } + + public Request() { + policyNames = Strings.EMPTY_ARRAY; + } + + public String[] getPolicyNames() { + return policyNames; + } + + public void setPolicyNames(final String[] policyNames) { + this.policyNames = policyNames; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + policyNames = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(policyNames); + } + + @Override + public int hashCode() { + return Arrays.hashCode(policyNames); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Arrays.equals(policyNames, other.policyNames); + } + + } + + @Override + public GetLifecycleActionRequestBuilder newRequestBuilder(final ElasticsearchClient client) { + return new GetLifecycleActionRequestBuilder(client, INSTANCE); + } + + public static class LifecyclePolicyResponseItem implements Writeable { + private final LifecyclePolicy lifecyclePolicy; + private final long version; + private final String modifiedDate; + + public LifecyclePolicyResponseItem(LifecyclePolicy lifecyclePolicy, long version, String modifiedDate) { + this.lifecyclePolicy = lifecyclePolicy; + this.version = version; + this.modifiedDate = modifiedDate; + } + + LifecyclePolicyResponseItem(StreamInput in) throws IOException { + this.lifecyclePolicy = new LifecyclePolicy(in); + this.version = in.readVLong(); + this.modifiedDate = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + lifecyclePolicy.writeTo(out); + out.writeVLong(version); + out.writeString(modifiedDate); + } + + public LifecyclePolicy getLifecyclePolicy() { + return lifecyclePolicy; + } + + public long getVersion() { + return version; + } + + public String getModifiedDate() { + return modifiedDate; + } + + @Override + public int hashCode() { + return Objects.hash(lifecyclePolicy, version, modifiedDate); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + LifecyclePolicyResponseItem other = (LifecyclePolicyResponseItem) obj; + return Objects.equals(lifecyclePolicy, other.lifecyclePolicy) && + Objects.equals(version, other.version) && + Objects.equals(modifiedDate, other.modifiedDate); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleActionRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleActionRequestBuilder.java new file mode 100644 index 0000000000000..ede22a6746a20 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleActionRequestBuilder.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +public class GetLifecycleActionRequestBuilder + extends ActionRequestBuilder { + + public GetLifecycleActionRequestBuilder( + final ElasticsearchClient client, + final Action action) { + super(client, action, new GetLifecycleAction.Request()); + } + + public GetLifecycleActionRequestBuilder setPolicyNames(final String[] policyNames) { + request.setPolicyNames(policyNames); + return this; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetStatusAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetStatusAction.java new file mode 100644 index 0000000000000..988342022731a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetStatusAction.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; + +import java.io.IOException; +import java.util.Objects; + +public class GetStatusAction extends Action { + public static final GetStatusAction INSTANCE = new GetStatusAction(); + public static final String NAME = "cluster:admin/ilm/operation_mode/get"; + + protected GetStatusAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private OperationMode mode; + + public Response() { + } + + public Response(OperationMode mode) { + this.mode = mode; + } + + public OperationMode getMode() { + return mode; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("operation_mode", mode); + builder.endObject(); + return builder; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + mode = in.readEnum(OperationMode.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(mode); + } + + @Override + public int hashCode() { + return Objects.hash(mode); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(mode, other.mode); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + } + + public static class Request extends AcknowledgedRequest { + + public Request() { + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } + } + + @Override + public GetStatusActionRequestBuilder newRequestBuilder(final ElasticsearchClient client) { + return new GetStatusActionRequestBuilder(client, INSTANCE); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetStatusActionRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetStatusActionRequestBuilder.java new file mode 100644 index 0000000000000..8aa09cb7dedb2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetStatusActionRequestBuilder.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +public class GetStatusActionRequestBuilder + extends ActionRequestBuilder { + + public GetStatusActionRequestBuilder( + final ElasticsearchClient client, + final Action action) { + super(client, action, new GetStatusAction.Request()); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepAction.java new file mode 100644 index 0000000000000..a122cc85d7699 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepAction.java @@ -0,0 +1,164 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + * + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.Objects; + +public class MoveToStepAction extends Action { + public static final MoveToStepAction INSTANCE = new MoveToStepAction(); + public static final String NAME = "cluster:admin/ilm/_move/post"; + + protected MoveToStepAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Response extends AcknowledgedResponse implements ToXContentObject { + + public Response() { + } + + public Response(boolean acknowledged) { + super(acknowledged); + } + } + + public static class Request extends AcknowledgedRequest implements ToXContentObject { + static final ParseField CURRENT_KEY_FIELD = new ParseField("current_step"); + static final ParseField NEXT_KEY_FIELD = new ParseField("next_step"); + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("move_to_step_request", false, + (a, index) -> { + StepKey currentStepKey = (StepKey) a[0]; + StepKey nextStepKey = (StepKey) a[1]; + return new Request(index, currentStepKey, nextStepKey); + }); + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, name) -> StepKey.parse(p), CURRENT_KEY_FIELD); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, name) -> StepKey.parse(p), NEXT_KEY_FIELD); + } + + private String index; + private StepKey currentStepKey; + private StepKey nextStepKey; + + public Request(String index, StepKey currentStepKey, StepKey nextStepKey) { + this.index = index; + this.currentStepKey = currentStepKey; + this.nextStepKey = nextStepKey; + } + + public Request() { + } + + public String getIndex() { + return index; + } + + public void setIndex(final String index) { + this.index = index; + } + + public StepKey getCurrentStepKey() { + return currentStepKey; + } + + public void setCurrentStepKey(final StepKey currentStepKey) { + this.currentStepKey = currentStepKey; + } + + public StepKey getNextStepKey() { + return nextStepKey; + } + + public void setNextStepKey(final StepKey nextStepKey) { + this.nextStepKey = nextStepKey; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public static Request parseRequest(String name, XContentParser parser) { + return PARSER.apply(parser, name); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.index = in.readString(); + this.currentStepKey = new StepKey(in); + this.nextStepKey = new StepKey(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(index); + currentStepKey.writeTo(out); + nextStepKey.writeTo(out); + } + + @Override + public int hashCode() { + return Objects.hash(index, currentStepKey, nextStepKey); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(index, other.index) && Objects.equals(currentStepKey, other.currentStepKey) + && Objects.equals(nextStepKey, other.nextStepKey); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(CURRENT_KEY_FIELD.getPreferredName(), currentStepKey) + .field(NEXT_KEY_FIELD.getPreferredName(), nextStepKey) + .endObject(); + } + } + + @Override + public MoveToStepActionRequestBuilder newRequestBuilder(final ElasticsearchClient client) { + return new MoveToStepActionRequestBuilder(client, INSTANCE); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepActionRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepActionRequestBuilder.java new file mode 100644 index 0000000000000..c6b3506b9c908 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepActionRequestBuilder.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.xpack.core.indexlifecycle.Step; + +public class MoveToStepActionRequestBuilder + extends ActionRequestBuilder { + + public MoveToStepActionRequestBuilder( + final ElasticsearchClient client, + final Action action) { + super(client, action, new MoveToStepAction.Request()); + } + + public MoveToStepActionRequestBuilder setIndex(final String index) { + request.setIndex(index); + return this; + } + + public MoveToStepActionRequestBuilder setCurrentStepKey(final Step.StepKey currentStepKey) { + request.setCurrentStepKey(currentStepKey); + return this; + } + + public MoveToStepActionRequestBuilder setNextStepKey(final Step.StepKey nextStepKey) { + request.setNextStepKey(nextStepKey); + return this; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleAction.java new file mode 100644 index 0000000000000..527ea14140ad3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleAction.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; + +import java.io.IOException; +import java.util.Objects; + +public class PutLifecycleAction extends Action { + public static final PutLifecycleAction INSTANCE = new PutLifecycleAction(); + public static final String NAME = "cluster:admin/ilm/put"; + + protected PutLifecycleAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Response extends AcknowledgedResponse implements ToXContentObject { + + public Response() { + } + + public Response(boolean acknowledged) { + super(acknowledged); + } + } + + public static class Request extends AcknowledgedRequest implements ToXContentObject { + + public static final ParseField POLICY_FIELD = new ParseField("policy"); + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("put_lifecycle_request", a -> new Request((LifecyclePolicy) a[0])); + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), LifecyclePolicy::parse, POLICY_FIELD); + } + + private LifecyclePolicy policy; + + public Request(LifecyclePolicy policy) { + this.policy = policy; + } + + public Request() { + } + + public LifecyclePolicy getPolicy() { + return policy; + } + + public void setPolicy(final LifecyclePolicy policy) { + this.policy = policy; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public static Request parseRequest(String name, XContentParser parser) { + return PARSER.apply(parser, name); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(POLICY_FIELD.getPreferredName(), policy); + builder.endObject(); + return builder; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + policy = new LifecyclePolicy(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + policy.writeTo(out); + } + + @Override + public int hashCode() { + return Objects.hash(policy); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(policy, other.policy); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + } + + @Override + public PutLifecycleActionRequestBuilder newRequestBuilder(final ElasticsearchClient client) { + return new PutLifecycleActionRequestBuilder(client, INSTANCE); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleActionRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleActionRequestBuilder.java new file mode 100644 index 0000000000000..5da8fbb8afdc1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleActionRequestBuilder.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; + +public class PutLifecycleActionRequestBuilder + extends ActionRequestBuilder { + + public PutLifecycleActionRequestBuilder( + final ElasticsearchClient client, + final Action action) { + super(client, action, new PutLifecycleAction.Request()); + } + + public PutLifecycleActionRequestBuilder setPolicy(LifecyclePolicy policy) { + request.setPolicy(policy); + return this; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java new file mode 100644 index 0000000000000..7058fa89c3e28 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +public class RemoveIndexLifecyclePolicyAction extends + Action { + public static final RemoveIndexLifecyclePolicyAction INSTANCE = new RemoveIndexLifecyclePolicyAction(); + public static final String NAME = "indices:admin/ilm/remove_policy"; + + protected RemoveIndexLifecyclePolicyAction() { + super(NAME); + } + + @Override + public RemoveIndexLifecyclePolicyAction.Response newResponse() { + return new Response(); + } + + public static class Response extends ActionResponse implements ToXContentObject { + + public static final ParseField HAS_FAILURES_FIELD = new ParseField("has_failures"); + public static final ParseField FAILED_INDEXES_FIELD = new ParseField("failed_indexes"); + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "change_policy_for_index_response", a -> new Response((List) a[0])); + static { + PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), FAILED_INDEXES_FIELD); + // Needs to be declared but not used in constructing the response object + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), HAS_FAILURES_FIELD); + } + + private List failedIndexes; + + public Response() { + } + + public Response(List failedIndexes) { + if (failedIndexes == null) { + throw new IllegalArgumentException(FAILED_INDEXES_FIELD.getPreferredName() + " cannot be null"); + } + this.failedIndexes = failedIndexes; + } + + public List getFailedIndexes() { + return failedIndexes; + } + + public boolean hasFailures() { + return failedIndexes.isEmpty() == false; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(HAS_FAILURES_FIELD.getPreferredName(), hasFailures()); + builder.field(FAILED_INDEXES_FIELD.getPreferredName(), failedIndexes); + builder.endObject(); + return builder; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + failedIndexes = in.readList(StreamInput::readString); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringList(failedIndexes); + } + + @Override + public int hashCode() { + return Objects.hash(failedIndexes); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(failedIndexes, other.failedIndexes); + } + + } + + public static class Request extends AcknowledgedRequest implements IndicesRequest.Replaceable { + + private String[] indices; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + + public Request() { + } + + public Request(String... indices) { + if (indices == null) { + throw new IllegalArgumentException("indices cannot be null"); + } + this.indices = indices; + } + + @Override + public Request indices(String... indices) { + this.indices = indices; + return this; + } + + @Override + public String[] indices() { + return indices; + } + + public void indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + } + + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indices), indicesOptions); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.deepEquals(indices, other.indices) && + Objects.equals(indicesOptions, other.indicesOptions); + } + + } + + @Override + public RemovePolicyForIndexActionRequestBuilder newRequestBuilder(final ElasticsearchClient client) { + return new RemovePolicyForIndexActionRequestBuilder(client, INSTANCE); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemovePolicyForIndexActionRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemovePolicyForIndexActionRequestBuilder.java new file mode 100644 index 0000000000000..94e63cbe46b34 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemovePolicyForIndexActionRequestBuilder.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.ElasticsearchClient; + +public class RemovePolicyForIndexActionRequestBuilder + extends ActionRequestBuilder< + RemoveIndexLifecyclePolicyAction.Request, + RemoveIndexLifecyclePolicyAction.Response, + RemovePolicyForIndexActionRequestBuilder> { + + public RemovePolicyForIndexActionRequestBuilder( + final ElasticsearchClient client, + final Action< + RemoveIndexLifecyclePolicyAction.Request, + RemoveIndexLifecyclePolicyAction.Response, + RemovePolicyForIndexActionRequestBuilder> action) { + super(client, action, new RemoveIndexLifecyclePolicyAction.Request()); + } + + public RemovePolicyForIndexActionRequestBuilder setIndices(final String... indices) { + request.indices(indices); + return this; + } + + public RemovePolicyForIndexActionRequestBuilder setIndicesOptions(final IndicesOptions indicesOptions) { + request.indicesOptions(indicesOptions); + return this; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryAction.java new file mode 100644 index 0000000000000..29585b4efa579 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryAction.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +public class RetryAction extends Action { + public static final RetryAction INSTANCE = new RetryAction(); + public static final String NAME = "indices:admin/ilm/retry"; + + protected RetryAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Response extends AcknowledgedResponse implements ToXContentObject { + + public Response() { + } + + public Response(boolean acknowledged) { + super(acknowledged); + } + } + + public static class Request extends AcknowledgedRequest implements IndicesRequest.Replaceable { + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + + public Request(String... indices) { + this.indices = indices; + } + + public Request() { + } + + @Override + public Request indices(String... indices) { + this.indices = indices; + return this; + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public Request indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.indices = in.readStringArray(); + this.indicesOptions = IndicesOptions.readIndicesOptions(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indices), indicesOptions); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.deepEquals(indices, other.indices) + && Objects.equals(indicesOptions, other.indicesOptions); + } + + } + + @Override + public RetryActionRequestBuilder newRequestBuilder(final ElasticsearchClient client) { + return new RetryActionRequestBuilder(client, INSTANCE); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryActionRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryActionRequestBuilder.java new file mode 100644 index 0000000000000..cfdbced22f160 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryActionRequestBuilder.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.ElasticsearchClient; + +public class RetryActionRequestBuilder extends ActionRequestBuilder { + + public RetryActionRequestBuilder( + final ElasticsearchClient client, + final Action action) { + super(client, action, new RetryAction.Request()); + } + + public RetryActionRequestBuilder setIndices(final String... indices) { + request.indices(indices); + return this; + } + + public RetryActionRequestBuilder setIndicesOptions(final IndicesOptions indicesOptions) { + request.indicesOptions(indicesOptions); + return this; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StartILMAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StartILMAction.java new file mode 100644 index 0000000000000..c72344b36cfaa --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StartILMAction.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.xpack.core.indexlifecycle.StartILMRequest; + +public class StartILMAction extends Action { + public static final StartILMAction INSTANCE = new StartILMAction(); + public static final String NAME = "cluster:admin/ilm/start"; + + protected StartILMAction() { + super(NAME); + } + + @Override + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + @Override + public StartILMActionRequestBuilder newRequestBuilder(final ElasticsearchClient client) { + return new StartILMActionRequestBuilder(client, INSTANCE); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StartILMActionRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StartILMActionRequestBuilder.java new file mode 100644 index 0000000000000..fbf336295853f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StartILMActionRequestBuilder.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.xpack.core.indexlifecycle.StartILMRequest; + +public class StartILMActionRequestBuilder + extends ActionRequestBuilder { + + public StartILMActionRequestBuilder( + final ElasticsearchClient client, + final Action action) { + super(client, action, new StartILMRequest()); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StopILMAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StopILMAction.java new file mode 100644 index 0000000000000..854da203e6c14 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StopILMAction.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.xpack.core.indexlifecycle.StopILMRequest; + +public class StopILMAction extends Action { + public static final StopILMAction INSTANCE = new StopILMAction(); + public static final String NAME = "cluster:admin/ilm/stop"; + + protected StopILMAction() { + super(NAME); + } + + @Override + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + @Override + public StopILMActionRequestBuilder newRequestBuilder(final ElasticsearchClient client) { + return new StopILMActionRequestBuilder(client, INSTANCE); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StopILMActionRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StopILMActionRequestBuilder.java new file mode 100644 index 0000000000000..8b6cdb4dee67e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StopILMActionRequestBuilder.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.xpack.core.indexlifecycle.StopILMRequest; + +public class StopILMActionRequestBuilder extends ActionRequestBuilder { + + public StopILMActionRequestBuilder( + final ElasticsearchClient client, + final Action action) { + super(client, action, new StopILMRequest()); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/client/ILMClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/client/ILMClient.java new file mode 100644 index 0000000000000..5e81d2e2aa7e5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/client/ILMClient.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.client; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleResponse; +import org.elasticsearch.xpack.core.indexlifecycle.StartILMRequest; +import org.elasticsearch.xpack.core.indexlifecycle.StopILMRequest; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.ExplainLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetStatusAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.StartILMAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.StopILMAction; + +/** + * A wrapper to elasticsearch clients that exposes all ILM related APIs + */ +public class ILMClient { + + private ElasticsearchClient client; + + public ILMClient(ElasticsearchClient client) { + this.client = client; + } + + /** + * Create or modify a lifecycle policy definition + */ + public void putLifecyclePolicy(PutLifecycleAction.Request request, ActionListener listener) { + client.execute(PutLifecycleAction.INSTANCE, request, listener); + } + + /** + * Create or modify a lifecycle policy definition + */ + public ActionFuture putLifecyclePolicy(PutLifecycleAction.Request request) { + return client.execute(PutLifecycleAction.INSTANCE, request); + } + + /** + * Get a lifecycle policy definition + */ + public void getLifecyclePolicy(GetLifecycleAction.Request request, ActionListener listener) { + client.execute(GetLifecycleAction.INSTANCE, request, listener); + } + + /** + * Get a lifecycle policy definition + */ + public ActionFuture getLifecyclePolicy(GetLifecycleAction.Request request) { + return client.execute(GetLifecycleAction.INSTANCE, request); + } + + /** + * Delete a lifecycle policy definition + */ + public void deleteLifecyclePolicy(DeleteLifecycleAction.Request request, ActionListener listener) { + client.execute(DeleteLifecycleAction.INSTANCE, request, listener); + } + + /** + * Delete a lifecycle policy definition + */ + public ActionFuture deleteLifecyclePolicy(DeleteLifecycleAction.Request request) { + return client.execute(DeleteLifecycleAction.INSTANCE, request); + } + + /** + * Explain the current lifecycle state for an index + */ + public void explainLifecycle(ExplainLifecycleRequest request, ActionListener listener) { + client.execute(ExplainLifecycleAction.INSTANCE, request, listener); + } + + /** + * Explain the current lifecycle state for an index + */ + public ActionFuture explainLifecycle(ExplainLifecycleRequest request) { + return client.execute(ExplainLifecycleAction.INSTANCE, request); + } + + /** + * Returns the current status of the ILM plugin + */ + public void getStatus(GetStatusAction.Request request, ActionListener listener) { + client.execute(GetStatusAction.INSTANCE, request, listener); + } + + /** + * Returns the current status of the ILM plugin + */ + public ActionFuture getStatus(GetStatusAction.Request request) { + return client.execute(GetStatusAction.INSTANCE, request); + } + + /** + * Removes index lifecycle management from an index + */ + public void removeIndexLifecyclePolicy(RemoveIndexLifecyclePolicyAction.Request request, + ActionListener listener) { + client.execute(RemoveIndexLifecyclePolicyAction.INSTANCE, request, listener); + } + + /** + * Removes index lifecycle management from an index + */ + public ActionFuture removeIndexLifecyclePolicy( + RemoveIndexLifecyclePolicyAction.Request request) { + return client.execute(RemoveIndexLifecyclePolicyAction.INSTANCE, request); + } + + /** + * Retries the policy for an index which is currently in ERROR + */ + public void retryPolicy(RetryAction.Request request, ActionListener listener) { + client.execute(RetryAction.INSTANCE, request, listener); + } + + /** + * Removes index lifecycle management from an index + */ + public ActionFuture retryPolicy(RetryAction.Request request) { + return client.execute(RetryAction.INSTANCE, request); + } + + /** + * Starts the ILM plugin + */ + public void startILM(StartILMRequest request, ActionListener listener) { + client.execute(StartILMAction.INSTANCE, request, listener); + } + + /** + * Starts the ILM plugin + */ + public ActionFuture startILM(StartILMRequest request) { + return client.execute(StartILMAction.INSTANCE, request); + } + + /** + * Stops the ILM plugin + */ + public void stopILM(StopILMRequest request, ActionListener listener) { + client.execute(StopILMAction.INSTANCE, request, listener); + } + + /** + * Stops the ILM plugin + */ + public ActionFuture stopILM(StopILMRequest request) { + return client.execute(StopILMAction.INSTANCE, request); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractActionTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractActionTestCase.java new file mode 100644 index 0000000000000..bed04a7cf5425 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractActionTestCase.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.util.List; +import java.util.stream.Collectors; + +public abstract class AbstractActionTestCase extends AbstractSerializingTestCase { + + public abstract void testToSteps(); + + protected boolean isSafeAction() { + return true; + } + + public final void testIsSafeAction() { + LifecycleAction action = createTestInstance(); + assertEquals(isSafeAction(), action.isSafeAction()); + } + + public void testToStepKeys() { + T action = createTestInstance(); + String phase = randomAlphaOfLengthBetween(1, 10); + StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10)); + List steps = action.toSteps(null, phase, nextStepKey); + assertNotNull(steps); + List stepKeys = action.toStepKeys(phase); + assertNotNull(stepKeys); + List expectedStepKeys = steps.stream().map(Step::getKey).collect(Collectors.toList()); + assertEquals(expectedStepKeys, stepKeys); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractStepTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractStepTestCase.java new file mode 100644 index 0000000000000..2757a0499aae9 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractStepTestCase.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +public abstract class AbstractStepTestCase extends ESTestCase { + + protected static final int NUMBER_OF_TEST_RUNS = 20; + + protected abstract T createRandomInstance(); + protected abstract T mutateInstance(T instance); + protected abstract T copyInstance(T instance); + + public void testHashcodeAndEquals() { + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createRandomInstance(), this::copyInstance, this::mutateInstance); + } + } + + public static StepKey randomStepKey() { + String randomPhase = randomAlphaOfLength(10); + String randomAction = randomAlphaOfLength(10); + String randomStepName = randomAlphaOfLength(10); + return new StepKey(randomPhase, randomAction, randomStepName); + } + + public void testStepNameNotError() { + T instance = createRandomInstance(); + StepKey stepKey = instance.getKey(); + assertFalse(ErrorStep.NAME.equals(stepKey.getName())); + StepKey nextStepKey = instance.getKey(); + assertFalse(ErrorStep.NAME.equals(nextStepKey.getName())); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateActionTests.java new file mode 100644 index 0000000000000..dfe2afc5d19ef --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateActionTests.java @@ -0,0 +1,151 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class AllocateActionTests extends AbstractActionTestCase { + + @Override + protected AllocateAction doParseInstance(XContentParser parser) { + return AllocateAction.parse(parser); + } + + @Override + protected AllocateAction createTestInstance() { + return randomInstance(); + } + + static AllocateAction randomInstance() { + boolean hasAtLeastOneMap = false; + Map includes; + if (randomBoolean()) { + includes = randomMap(1, 100); + hasAtLeastOneMap = true; + } else { + includes = randomBoolean() ? null : Collections.emptyMap(); + } + Map excludes; + if (randomBoolean()) { + hasAtLeastOneMap = true; + excludes = randomMap(1, 100); + } else { + excludes = randomBoolean() ? null : Collections.emptyMap(); + } + Map requires; + if (hasAtLeastOneMap == false || randomBoolean()) { + requires = randomMap(1, 100); + } else { + requires = randomBoolean() ? null : Collections.emptyMap(); + } + Integer numberOfReplicas = randomBoolean() ? null : randomIntBetween(0, 10); + return new AllocateAction(numberOfReplicas, includes, excludes, requires); + } + + + @Override + protected Reader instanceReader() { + return AllocateAction::new; + } + + @Override + protected AllocateAction mutateInstance(AllocateAction instance) { + Map include = instance.getInclude(); + Map exclude = instance.getExclude(); + Map require = instance.getRequire(); + Integer numberOfReplicas = instance.getNumberOfReplicas(); + switch (randomIntBetween(0, 3)) { + case 0: + include = new HashMap<>(include); + include.put(randomAlphaOfLengthBetween(11, 15), randomAlphaOfLengthBetween(1, 20)); + break; + case 1: + exclude = new HashMap<>(exclude); + exclude.put(randomAlphaOfLengthBetween(11, 15), randomAlphaOfLengthBetween(1, 20)); + break; + case 2: + require = new HashMap<>(require); + require.put(randomAlphaOfLengthBetween(11, 15), randomAlphaOfLengthBetween(1, 20)); + break; + case 3: + numberOfReplicas = randomIntBetween(11, 20); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new AllocateAction(numberOfReplicas, include, exclude, require); + } + + public void testAllMapsNullOrEmpty() { + Map include = randomBoolean() ? null : Collections.emptyMap(); + Map exclude = randomBoolean() ? null : Collections.emptyMap(); + Map require = randomBoolean() ? null : Collections.emptyMap(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new AllocateAction(null, include, exclude, require)); + assertEquals("At least one of " + AllocateAction.INCLUDE_FIELD.getPreferredName() + ", " + + AllocateAction.EXCLUDE_FIELD.getPreferredName() + " or " + AllocateAction.REQUIRE_FIELD.getPreferredName() + + "must contain attributes for action " + AllocateAction.NAME, exception.getMessage()); + } + + public void testInvalidNumberOfReplicas() { + Map include = randomMap(1, 5); + Map exclude = randomBoolean() ? null : Collections.emptyMap(); + Map require = randomBoolean() ? null : Collections.emptyMap(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new AllocateAction(randomIntBetween(-1000, -1), include, exclude, require)); + assertEquals("[" + AllocateAction.NUMBER_OF_REPLICAS_FIELD.getPreferredName() + "] must be >= 0", exception.getMessage()); + } + + public static Map randomMap(int minEntries, int maxEntries) { + Map map = new HashMap<>(); + int numIncludes = randomIntBetween(minEntries, maxEntries); + for (int i = 0; i < numIncludes; i++) { + map.put(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + } + return map; + } + + public void testToSteps() { + AllocateAction action = createTestInstance(); + String phase = randomAlphaOfLengthBetween(1, 10); + StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10)); + List steps = action.toSteps(null, phase, nextStepKey); + assertNotNull(steps); + assertEquals(2, steps.size()); + StepKey expectedFirstStepKey = new StepKey(phase, AllocateAction.NAME, AllocateAction.NAME); + StepKey expectedSecondStepKey = new StepKey(phase, AllocateAction.NAME, AllocationRoutedStep.NAME); + UpdateSettingsStep firstStep = (UpdateSettingsStep) steps.get(0); + assertEquals(expectedFirstStepKey, firstStep.getKey()); + assertEquals(expectedSecondStepKey, firstStep.getNextStepKey()); + Settings.Builder expectedSettings = Settings.builder(); + if (action.getNumberOfReplicas() != null) { + expectedSettings.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, action.getNumberOfReplicas()); + } + action.getInclude().forEach( + (key, value) -> expectedSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + key, value)); + action.getExclude().forEach( + (key, value) -> expectedSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + key, value)); + action.getRequire().forEach( + (key, value) -> expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + key, value)); + assertThat(firstStep.getSettings(), equalTo(expectedSettings.build())); + AllocationRoutedStep secondStep = (AllocationRoutedStep) steps.get(1); + assertEquals(expectedSecondStepKey, secondStep.getKey()); + assertEquals(nextStepKey, secondStep.getNextStepKey()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStepInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStepInfoTests.java new file mode 100644 index 0000000000000..80eb89c45b952 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStepInfoTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.xpack.core.indexlifecycle.AllocationRoutedStep.Info; + +import java.io.IOException; + +public class AllocationRoutedStepInfoTests extends AbstractXContentTestCase { + + @Override + protected Info createTestInstance() { + return new Info(randomNonNegativeLong(), randomNonNegativeLong(), randomBoolean()); + } + + @Override + protected Info doParseInstance(XContentParser parser) throws IOException { + return Info.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public final void testEqualsAndHashcode() { + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), this::copyInstance, this::mutateInstance); + } + } + + protected final Info copyInstance(Info instance) throws IOException { + return new Info(instance.getActualReplicas(), instance.getNumberShardsLeftToAllocate(), instance.allShardsActive()); + } + + protected Info mutateInstance(Info instance) throws IOException { + long actualReplicas = instance.getActualReplicas(); + long shardsToAllocate = instance.getNumberShardsLeftToAllocate(); + boolean allShardsActive = instance.allShardsActive(); + switch (between(0, 2)) { + case 0: + shardsToAllocate += between(1, 20); + break; + case 1: + allShardsActive = allShardsActive == false; + break; + case 2: + actualReplicas += between(1, 20); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new Info(actualReplicas, shardsToAllocate, allShardsActive); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStepTests.java new file mode 100644 index 0000000000000..bdbd129993bed --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStepTests.java @@ -0,0 +1,314 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.UnassignedInfo.Reason; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.node.Node; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateWaitStep.Result; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.util.Collections; +import java.util.Map; + +public class AllocationRoutedStepTests extends AbstractStepTestCase { + + @Override + public AllocationRoutedStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + boolean waitOnAllShardCopies = randomBoolean(); + + return new AllocationRoutedStep(stepKey, nextStepKey, waitOnAllShardCopies); + } + + @Override + public AllocationRoutedStep mutateInstance(AllocationRoutedStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + boolean waitOnAllShardCopies = instance.getWaitOnAllShardCopies(); + + switch (between(0, 2)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + waitOnAllShardCopies = waitOnAllShardCopies == false; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new AllocationRoutedStep(key, nextKey, waitOnAllShardCopies); + } + + @Override + public AllocationRoutedStep copyInstance(AllocationRoutedStep instance) { + return new AllocationRoutedStep(instance.getKey(), instance.getNextStepKey(), instance.getWaitOnAllShardCopies()); + } + + public void testConditionMet() { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map includes = AllocateActionTests.randomMap(1, 5); + Map excludes = AllocateActionTests.randomMap(1, 5); + Map requires = AllocateActionTests.randomMap(1, 5); + Settings.Builder existingSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + includes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + excludes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + }); + requires.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.STARTED)); + + AllocationRoutedStep step = createRandomInstance(); + assertAllocateStatus(index, 1, 0, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(true, null)); + } + + public void testConditionMetOnlyOneCopyAllocated() { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map includes = AllocateActionTests.randomMap(1, 5); + Map excludes = AllocateActionTests.randomMap(1, 5); + Map requires = AllocateActionTests.randomMap(1, 5); + Settings.Builder existingSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + includes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + excludes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + }); + requires.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + boolean primaryOnNode1 = randomBoolean(); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", primaryOnNode1, ShardRoutingState.STARTED)) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node2", primaryOnNode1 == false, + ShardRoutingState.STARTED)); + + AllocationRoutedStep step = new AllocationRoutedStep(randomStepKey(), randomStepKey(), false); + assertAllocateStatus(index, 1, 0, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(true, null)); + } + + public void testExecuteAllocateNotComplete() throws Exception { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map includes = AllocateActionTests.randomMap(1, 5); + Map excludes = AllocateActionTests.randomMap(1, 5); + Map requires = AllocateActionTests.randomMap(1, 5); + Settings.Builder existingSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + includes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + excludes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + }); + requires.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.STARTED)) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 1), "node2", true, ShardRoutingState.STARTED)); + + AllocationRoutedStep step = createRandomInstance(); + assertAllocateStatus(index, 2, 0, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(false, new AllocationRoutedStep.Info(0, 1, true))); + } + + public void testExecuteAllocateNotCompleteOnlyOneCopyAllocated() throws Exception { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map includes = AllocateActionTests.randomMap(1, 5); + Map excludes = AllocateActionTests.randomMap(1, 5); + Map requires = AllocateActionTests.randomMap(1, 5); + Settings.Builder existingSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + includes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + excludes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + }); + requires.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + + boolean primaryOnNode1 = randomBoolean(); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", primaryOnNode1, ShardRoutingState.STARTED)) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node2", primaryOnNode1 == false, + ShardRoutingState.STARTED)); + + AllocationRoutedStep step = new AllocationRoutedStep(randomStepKey(), randomStepKey(), true); + assertAllocateStatus(index, 2, 0, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(false, new AllocationRoutedStep.Info(0, 1, true))); + } + + public void testExecuteAllocateUnassigned() throws Exception { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map includes = AllocateActionTests.randomMap(1, 5); + Map excludes = AllocateActionTests.randomMap(1, 5); + Map requires = AllocateActionTests.randomMap(1, 5); + Settings.Builder existingSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + includes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + excludes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + }); + requires.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.STARTED)) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 1), null, null, true, ShardRoutingState.UNASSIGNED, + new UnassignedInfo(randomFrom(Reason.values()), "the shard is intentionally unassigned"))); + + AllocationRoutedStep step = createRandomInstance(); + assertAllocateStatus(index, 2, 0, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(false, new AllocationRoutedStep.Info(0, -1, false))); + } + + /** + * this tests the scenario where + * + * PUT index + * { + * "settings": { + * "number_of_replicas": 0, + * "number_of_shards": 1 + * } + * } + * + * PUT index/_settings + * { + * "number_of_replicas": 1, + * "index.routing.allocation.include._name": "{node-name}" + * } + */ + public void testExecuteReplicasNotAllocatedOnSingleNode() { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map requires = Collections.singletonMap("_name", "node1"); + Settings.Builder existingSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + requires.forEach((k, v) -> { + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + }); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.STARTED)) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), null, null, false, ShardRoutingState.UNASSIGNED, + new UnassignedInfo(Reason.REPLICA_ADDED, "no attempt"))); + + AllocationRoutedStep step = createRandomInstance(); + assertAllocateStatus(index, 1, 1, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(false, new AllocationRoutedStep.Info(1, -1, false))); + } + + public void testExecuteIndexMissing() throws Exception { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).build(); + + AllocationRoutedStep step = createRandomInstance(); + + Result actualResult = step.isConditionMet(index, clusterState); + assertFalse(actualResult.isComplete()); + assertNull(actualResult.getInfomationContext()); + } + + private void assertAllocateStatus(Index index, int shards, int replicas, AllocationRoutedStep step, Settings.Builder existingSettings, + Settings.Builder node1Settings, Settings.Builder node2Settings, IndexRoutingTable.Builder indexRoutingTable, + ClusterStateWaitStep.Result expectedResult) { + IndexMetaData indexMetadata = IndexMetaData.builder(index.getName()).settings(existingSettings).numberOfShards(shards) + .numberOfReplicas(replicas).build(); + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder().fPut(index.getName(), + indexMetadata); + + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).metaData(MetaData.builder().indices(indices.build())) + .nodes(DiscoveryNodes.builder() + .add(DiscoveryNode.createLocal(node1Settings.build(), new TransportAddress(TransportAddress.META_ADDRESS, 9200), + "node1")) + .add(DiscoveryNode.createLocal(node2Settings.build(), new TransportAddress(TransportAddress.META_ADDRESS, 9201), + "node2"))) + .routingTable(RoutingTable.builder().add(indexRoutingTable).build()).build(); + Result actualResult = step.isConditionMet(index, clusterState); + assertEquals(expectedResult.isComplete(), actualResult.isComplete()); + assertEquals(expectedResult.getInfomationContext(), actualResult.getInfomationContext()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStepTests.java new file mode 100644 index 0000000000000..40dd022c05de6 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStepTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.util.Map; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionStateTests.createCustomMetadata; + +public class CopyExecutionStateStepTests extends AbstractStepTestCase { + @Override + protected CopyExecutionStateStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + String shrunkIndexPrefix = randomAlphaOfLength(10); + return new CopyExecutionStateStep(stepKey, nextStepKey, shrunkIndexPrefix); + } + + @Override + protected CopyExecutionStateStep mutateInstance(CopyExecutionStateStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + String shrunkIndexPrefix = instance.getShrunkIndexPrefix(); + + switch (between(0, 2)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + shrunkIndexPrefix += randomAlphaOfLength(5); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new CopyExecutionStateStep(key, nextKey, shrunkIndexPrefix); + } + + @Override + protected CopyExecutionStateStep copyInstance(CopyExecutionStateStep instance) { + return new CopyExecutionStateStep(instance.getKey(), instance.getNextStepKey(), instance.getShrunkIndexPrefix()); + } + + public void testPerformAction() { + CopyExecutionStateStep step = createRandomInstance(); + String indexName = randomAlphaOfLengthBetween(5, 20); + Map customMetadata = createCustomMetadata(); + + IndexMetaData originalIndexMetaData = IndexMetaData.builder(indexName) + .settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1,5)) + .numberOfReplicas(randomIntBetween(1,5)) + .putCustom(ILM_CUSTOM_METADATA_KEY, customMetadata) + .build(); + IndexMetaData shrunkIndexMetaData = IndexMetaData.builder(step.getShrunkIndexPrefix() + indexName) + .settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1,5)) + .numberOfReplicas(randomIntBetween(1,5)) + .build(); + ClusterState originalClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder() + .put(originalIndexMetaData, false) + .put(shrunkIndexMetaData, false)) + .build(); + + ClusterState newClusterState = step.performAction(originalIndexMetaData.getIndex(), originalClusterState); + + LifecycleExecutionState oldIndexData = LifecycleExecutionState.fromIndexMetadata(originalIndexMetaData); + LifecycleExecutionState newIndexData = LifecycleExecutionState + .fromIndexMetadata(newClusterState.metaData().index(step.getShrunkIndexPrefix() + indexName)); + + assertEquals(oldIndexData.getLifecycleDate(), newIndexData.getLifecycleDate()); + assertEquals(oldIndexData.getPhase(), newIndexData.getPhase()); + assertEquals(oldIndexData.getAction(), newIndexData.getAction()); + assertEquals(ShrunkenIndexCheckStep.NAME, newIndexData.getStep()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteActionTests.java new file mode 100644 index 0000000000000..3286ce0225a39 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteActionTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.List; + +public class DeleteActionTests extends AbstractActionTestCase { + + @Override + protected DeleteAction doParseInstance(XContentParser parser) throws IOException { + return DeleteAction.parse(parser); + } + + @Override + protected DeleteAction createTestInstance() { + return new DeleteAction(); + } + + @Override + protected Reader instanceReader() { + return DeleteAction::new; + } + + public void testToSteps() { + DeleteAction action = createTestInstance(); + String phase = randomAlphaOfLengthBetween(1, 10); + StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10)); + List steps = action.toSteps(null, phase, nextStepKey); + assertNotNull(steps); + assertEquals(1, steps.size()); + StepKey expectedFirstStepKey = new StepKey(phase, DeleteAction.NAME, DeleteStep.NAME); + DeleteStep firstStep = (DeleteStep) steps.get(0); + assertEquals(expectedFirstStepKey, firstStep.getKey()); + assertEquals(nextStepKey, firstStep.getNextStepKey()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStepTests.java new file mode 100644 index 0000000000000..c85df6de659e8 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStepTests.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.junit.Before; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import static org.hamcrest.Matchers.equalTo; + +public class DeleteStepTests extends AbstractStepTestCase { + + private Client client; + + @Before + public void setup() { + client = Mockito.mock(Client.class); + } + + @Override + public DeleteStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + + return new DeleteStep(stepKey, nextStepKey, client); + } + + @Override + public DeleteStep mutateInstance(DeleteStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + + switch (between(0, 1)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new DeleteStep(key, nextKey, instance.getClient()); + } + + @Override + public DeleteStep copyInstance(DeleteStep instance) { + return new DeleteStep(instance.getKey(), instance.getNextStepKey(), instance.getClient()); + } + + public void testIndexSurvives() { + assertFalse(createRandomInstance().indexSurvives()); + } + + public void testDeleted() { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(invocation -> { + DeleteIndexRequest request = (DeleteIndexRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + assertNotNull(request); + assertEquals(1, request.indices().length); + assertEquals(indexMetaData.getIndex().getName(), request.indices()[0]); + listener.onResponse(null); + return null; + }).when(indicesClient).delete(Mockito.any(), Mockito.any()); + + SetOnce actionCompleted = new SetOnce<>(); + + DeleteStep step = createRandomInstance(); + step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }); + + assertThat(actionCompleted.get(), equalTo(true)); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).delete(Mockito.any(), Mockito.any()); + } + + public void testExceptionThrown() { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Exception exception = new RuntimeException(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + DeleteIndexRequest request = (DeleteIndexRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + assertNotNull(request); + assertEquals(1, request.indices().length); + assertEquals(indexMetaData.getIndex().getName(), request.indices()[0]); + listener.onFailure(exception); + return null; + } + + }).when(indicesClient).delete(Mockito.any(), Mockito.any()); + + SetOnce exceptionThrown = new SetOnce<>(); + DeleteStep step = createRandomInstance(); + step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertEquals(exception, e); + exceptionThrown.set(true); + } + }); + + assertThat(exceptionThrown.get(), equalTo(true)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ErrorStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ErrorStepTests.java new file mode 100644 index 0000000000000..5bd0cdf230da5 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ErrorStepTests.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +public class ErrorStepTests extends AbstractStepTestCase { + + @Override + public ErrorStep createRandomInstance() { + StepKey stepKey = new StepKey(randomAlphaOfLength(10), randomAlphaOfLength(10), ErrorStep.NAME); + return new ErrorStep(stepKey); + } + + @Override + public ErrorStep mutateInstance(ErrorStep instance) { + StepKey key = instance.getKey(); + assertSame(instance.getNextStepKey(), instance.getKey()); + + key = new StepKey(key.getPhase(), key.getAction() + randomAlphaOfLength(5), key.getName()); + + return new ErrorStep(key); + } + + @Override + public ErrorStep copyInstance(ErrorStep instance) { + assertSame(instance.getNextStepKey(), instance.getKey()); + return new ErrorStep(instance.getKey()); + } + + public void testInvalidStepKey() { + StepKey invalidKey = randomStepKey(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> new ErrorStep(invalidKey)); + assertEquals("An error step must have a step key whose step name is " + ErrorStep.NAME, exception.getMessage()); + } + + @Override + public void testStepNameNotError() { + // Need to override this test because this is the one special step that + // is allowed to have ERROR as the step name + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequestTests.java new file mode 100644 index 0000000000000..13ada1d6d39a5 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequestTests.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.Arrays; + +public class ExplainLifecycleRequestTests extends AbstractWireSerializingTestCase { + + @Override + protected ExplainLifecycleRequest createTestInstance() { + ExplainLifecycleRequest request = new ExplainLifecycleRequest(); + if (randomBoolean()) { + request.indices(generateRandomStringArray(20, 20, false, true)); + } + if (randomBoolean()) { + IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean()); + request.indicesOptions(indicesOptions); + } + return request; + } + + @Override + protected ExplainLifecycleRequest mutateInstance(ExplainLifecycleRequest instance) throws IOException { + String[] indices = instance.indices(); + IndicesOptions indicesOptions = instance.indicesOptions(); + switch (between(0, 1)) { + case 0: + indices = randomValueOtherThanMany(i -> Arrays.equals(i, instance.indices()), + () -> generateRandomStringArray(20, 10, false, true)); + break; + case 1: + indicesOptions = randomValueOtherThan(indicesOptions, () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + ExplainLifecycleRequest newRequest = new ExplainLifecycleRequest(); + newRequest.indices(indices); + newRequest.indicesOptions(indicesOptions); + return newRequest; + } + + @Override + protected Reader instanceReader() { + return ExplainLifecycleRequest::new; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponseTests.java new file mode 100644 index 0000000000000..8b64e1128c020 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponseTests.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class ExplainLifecycleResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected ExplainLifecycleResponse createTestInstance() { + Map indexResponses = new HashMap<>(); + for (int i = 0; i < randomIntBetween(0, 2); i++) { + IndexLifecycleExplainResponse indexResponse = IndexExplainResponseTests.randomIndexExplainResponse(); + indexResponses.put(indexResponse.getIndex(), indexResponse); + } + return new ExplainLifecycleResponse(indexResponses); + } + + @Override + protected ExplainLifecycleResponse createBlankInstance() { + return new ExplainLifecycleResponse(); + } + + @Override + protected ExplainLifecycleResponse mutateInstance(ExplainLifecycleResponse response) { + Map indexResponses = new HashMap<>(response.getIndexResponses()); + IndexLifecycleExplainResponse indexResponse = IndexExplainResponseTests.randomIndexExplainResponse(); + indexResponses.put(indexResponse.getIndex(), indexResponse); + return new ExplainLifecycleResponse(indexResponses); + } + + @Override + protected ExplainLifecycleResponse doParseInstance(XContentParser parser) throws IOException { + return ExplainLifecycleResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Arrays + .asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new))); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(MockAction.NAME), MockAction::parse)); + return new NamedXContentRegistry(entries); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeActionTests.java new file mode 100644 index 0000000000000..3772124385c46 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeActionTests.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class ForceMergeActionTests extends AbstractActionTestCase { + + @Override + protected ForceMergeAction doParseInstance(XContentParser parser) { + return ForceMergeAction.parse(parser); + } + + @Override + protected ForceMergeAction createTestInstance() { + return randomInstance(); + } + + static ForceMergeAction randomInstance() { + return new ForceMergeAction(randomIntBetween(1, 100)); + } + + @Override + protected ForceMergeAction mutateInstance(ForceMergeAction instance) { + int maxNumSegments = instance.getMaxNumSegments(); + maxNumSegments = maxNumSegments + randomIntBetween(1, 10); + return new ForceMergeAction(maxNumSegments); + } + + @Override + protected Reader instanceReader() { + return ForceMergeAction::new; + } + + public void testMissingMaxNumSegments() throws IOException { + BytesReference emptyObject = BytesReference.bytes(JsonXContent.contentBuilder().startObject().endObject()); + XContentParser parser = XContentHelper.createParser(null, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + emptyObject, XContentType.JSON); + Exception e = expectThrows(IllegalArgumentException.class, () -> ForceMergeAction.parse(parser)); + assertThat(e.getMessage(), equalTo("Required [max_num_segments]")); + } + + public void testInvalidNegativeSegmentNumber() { + Exception r = expectThrows(IllegalArgumentException.class, () -> new ForceMergeAction(randomIntBetween(-10, 0))); + assertThat(r.getMessage(), equalTo("[max_num_segments] must be a positive integer")); + } + + public void testToSteps() { + ForceMergeAction instance = createTestInstance(); + String phase = randomAlphaOfLength(5); + StepKey nextStepKey = new StepKey(randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10)); + List steps = instance.toSteps(null, phase, nextStepKey); + assertNotNull(steps); + assertEquals(3, steps.size()); + UpdateSettingsStep firstStep = (UpdateSettingsStep) steps.get(0); + ForceMergeStep secondStep = (ForceMergeStep) steps.get(1); + SegmentCountStep thirdStep = (SegmentCountStep) steps.get(2); + assertThat(firstStep.getKey(), equalTo(new StepKey(phase, ForceMergeAction.NAME, ReadOnlyAction.NAME))); + assertThat(firstStep.getNextStepKey(), equalTo(new StepKey(phase, ForceMergeAction.NAME, ForceMergeStep.NAME))); + assertTrue(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.get(firstStep.getSettings())); + assertThat(secondStep.getKey(), equalTo(new StepKey(phase, ForceMergeAction.NAME, ForceMergeStep.NAME))); + assertThat(secondStep.getNextStepKey(), equalTo(thirdStep.getKey())); + assertThat(thirdStep.getKey(), equalTo(new StepKey(phase, ForceMergeAction.NAME, SegmentCountStep.NAME))); + assertThat(thirdStep.getNextStepKey(), equalTo(nextStepKey)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStepTests.java new file mode 100644 index 0000000000000..9a38ddf3a2677 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStepTests.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.mockito.Mockito; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ForceMergeStepTests extends AbstractStepTestCase { + + @Override + public ForceMergeStep createRandomInstance() { + Step.StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + int maxNumSegments = randomIntBetween(1, 10); + + return new ForceMergeStep(stepKey, nextStepKey, null, maxNumSegments); + } + + @Override + public ForceMergeStep mutateInstance(ForceMergeStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + int maxNumSegments = instance.getMaxNumSegments(); + + switch (between(0, 2)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + maxNumSegments += 1; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new ForceMergeStep(key, nextKey, instance.getClient(), maxNumSegments); + } + + @Override + public ForceMergeStep copyInstance(ForceMergeStep instance) { + return new ForceMergeStep(instance.getKey(), instance.getNextStepKey(), + instance.getClient(), instance.getMaxNumSegments()); + } + + public void testPerformActionComplete() { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Step.StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + int maxNumSegments = randomIntBetween(1, 10); + Client client = mock(Client.class); + AdminClient adminClient = mock(AdminClient.class); + IndicesAdminClient indicesClient = mock(IndicesAdminClient.class); + when(client.admin()).thenReturn(adminClient); + when(adminClient.indices()).thenReturn(indicesClient); + ForceMergeResponse forceMergeResponse = Mockito.mock(ForceMergeResponse.class); + Mockito.when(forceMergeResponse.getStatus()).thenReturn(RestStatus.OK); + Mockito.doAnswer(invocationOnMock -> { + ForceMergeRequest request = (ForceMergeRequest) invocationOnMock.getArguments()[0]; + assertThat(request.maxNumSegments(), equalTo(maxNumSegments)); + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(forceMergeResponse); + return null; + }).when(indicesClient).forceMerge(any(), any()); + + ForceMergeStep step = new ForceMergeStep(stepKey, nextStepKey, client, maxNumSegments); + SetOnce completed = new SetOnce<>(); + step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + completed.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("unexpected method call", e); + } + }); + assertThat(completed.get(), equalTo(true)); + } + + public void testPerformActionThrowsException() { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Exception exception = new RuntimeException("error"); + Step.StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + int maxNumSegments = randomIntBetween(1, 10); + Client client = mock(Client.class); + AdminClient adminClient = mock(AdminClient.class); + IndicesAdminClient indicesClient = mock(IndicesAdminClient.class); + when(client.admin()).thenReturn(adminClient); + when(adminClient.indices()).thenReturn(indicesClient); + ForceMergeResponse forceMergeResponse = Mockito.mock(ForceMergeResponse.class); + Mockito.when(forceMergeResponse.getStatus()).thenReturn(RestStatus.OK); + Mockito.doAnswer(invocationOnMock -> { + ForceMergeRequest request = (ForceMergeRequest) invocationOnMock.getArguments()[0]; + assertThat(request.indices().length, equalTo(1)); + assertThat(request.indices()[0], equalTo(indexMetaData.getIndex().getName())); + assertThat(request.maxNumSegments(), equalTo(maxNumSegments)); + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onFailure(exception); + return null; + }).when(indicesClient).forceMerge(any(), any()); + + ForceMergeStep step = new ForceMergeStep(stepKey, nextStepKey, client, maxNumSegments); + SetOnce exceptionThrown = new SetOnce<>(); + step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + throw new AssertionError("unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertEquals(exception, e); + exceptionThrown.set(true); + } + }); + assertThat(exceptionThrown.get(), equalTo(true)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/IndexExplainResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/IndexExplainResponseTests.java new file mode 100644 index 0000000000000..90915d66e5d47 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/IndexExplainResponseTests.java @@ -0,0 +1,204 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +public class IndexExplainResponseTests extends AbstractSerializingTestCase { + + static IndexLifecycleExplainResponse randomIndexExplainResponse() { + if (frequently()) { + return randomManagedIndexExplainResponse(); + } else { + return randomUnmanagedIndexExplainResponse(); + } + } + + private static IndexLifecycleExplainResponse randomUnmanagedIndexExplainResponse() { + return IndexLifecycleExplainResponse.newUnmanagedIndexResponse(randomAlphaOfLength(10)); + } + + private static IndexLifecycleExplainResponse randomManagedIndexExplainResponse() { + return IndexLifecycleExplainResponse.newManagedIndexResponse(randomAlphaOfLength(10), randomAlphaOfLength(10), + randomNonNegativeLong(), randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10), + randomBoolean() ? null : randomAlphaOfLength(10), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomBoolean() ? null : new BytesArray(new RandomStepInfo(() -> randomAlphaOfLength(10)).toString()), + randomBoolean() ? null : PhaseExecutionInfoTests.randomPhaseExecutionInfo("")); + } + + @Override + protected IndexLifecycleExplainResponse createTestInstance() { + return randomIndexExplainResponse(); + } + + @Override + protected Reader instanceReader() { + return IndexLifecycleExplainResponse::new; + } + + @Override + protected IndexLifecycleExplainResponse doParseInstance(XContentParser parser) throws IOException { + return IndexLifecycleExplainResponse.PARSER.apply(parser, null); + } + + @Override + protected IndexLifecycleExplainResponse mutateInstance(IndexLifecycleExplainResponse instance) throws IOException { + String index = instance.getIndex(); + String policy = instance.getPolicyName(); + String phase = instance.getPhase(); + String action = instance.getAction(); + String step = instance.getStep(); + String failedStep = instance.getFailedStep(); + Long policyTime = instance.getLifecycleDate(); + Long phaseTime = instance.getPhaseTime(); + Long actionTime = instance.getActionTime(); + Long stepTime = instance.getStepTime(); + boolean managed = instance.managedByILM(); + BytesReference stepInfo = instance.getStepInfo(); + PhaseExecutionInfo phaseExecutionInfo = instance.getPhaseExecutionInfo(); + if (managed) { + switch (between(0, 12)) { + case 0: + index = index + randomAlphaOfLengthBetween(1, 5); + break; + case 1: + policy = policy + randomAlphaOfLengthBetween(1, 5); + break; + case 2: + phase = phase + randomAlphaOfLengthBetween(1, 5); + break; + case 3: + action = action + randomAlphaOfLengthBetween(1, 5); + break; + case 4: + step = step + randomAlphaOfLengthBetween(1, 5); + break; + case 5: + if (Strings.hasLength(failedStep) == false) { + failedStep = randomAlphaOfLength(10); + } else if (randomBoolean()) { + failedStep = failedStep + randomAlphaOfLengthBetween(1, 5); + } else { + failedStep = null; + } + break; + case 6: + policyTime += randomLongBetween(0, 100000); + break; + case 7: + phaseTime += randomLongBetween(0, 100000); + break; + case 8: + actionTime += randomLongBetween(0, 100000); + break; + case 9: + stepTime += randomLongBetween(0, 100000); + break; + case 10: + if (Strings.hasLength(stepInfo) == false) { + stepInfo = new BytesArray(randomByteArrayOfLength(100)); + } else if (randomBoolean()) { + stepInfo = randomValueOtherThan(stepInfo, + () -> new BytesArray(new RandomStepInfo(() -> randomAlphaOfLength(10)).toString())); + } else { + stepInfo = null; + } + break; + case 11: + phaseExecutionInfo = randomValueOtherThan(phaseExecutionInfo, () -> PhaseExecutionInfoTests.randomPhaseExecutionInfo("")); + break; + case 12: + return IndexLifecycleExplainResponse.newUnmanagedIndexResponse(index); + default: + throw new AssertionError("Illegal randomisation branch"); + } + return IndexLifecycleExplainResponse.newManagedIndexResponse(index, policy, policyTime, phase, action, step, failedStep, + phaseTime, actionTime, stepTime, stepInfo, phaseExecutionInfo); + } else { + switch (between(0, 1)) { + case 0: + return IndexLifecycleExplainResponse.newUnmanagedIndexResponse(index + randomAlphaOfLengthBetween(1, 5)); + case 1: + return randomManagedIndexExplainResponse(); + default: + throw new AssertionError("Illegal randomisation branch"); + } + } + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Arrays + .asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new))); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(MockAction.NAME), MockAction::parse)); + return new NamedXContentRegistry(entries); + } + + private static class RandomStepInfo implements ToXContentObject { + + private final String key; + private final String value; + + RandomStepInfo(Supplier randomStringSupplier) { + this.key = randomStringSupplier.get(); + this.value = randomStringSupplier.get(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(key, value); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(key, value); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RandomStepInfo other = (RandomStepInfo) obj; + return Objects.equals(key, other.key) && Objects.equals(value, other.value); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleFeatureSetUsageTests.java new file mode 100644 index 0000000000000..1035eb7a7462c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleFeatureSetUsageTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PolicyStats; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class IndexLifecycleFeatureSetUsageTests extends AbstractWireSerializingTestCase { + + @Override + protected IndexLifecycleFeatureSetUsage createTestInstance() { + boolean enabled = randomBoolean(); + boolean available = randomBoolean(); + List policyStats = null; + if (enabled) { + int size = randomIntBetween(0, 10); + policyStats = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + policyStats.add(PolicyStatsTests.createRandomInstance()); + } + } + return new IndexLifecycleFeatureSetUsage(available, enabled, policyStats); + } + + @Override + protected IndexLifecycleFeatureSetUsage mutateInstance(IndexLifecycleFeatureSetUsage instance) throws IOException { + boolean available = instance.available(); + boolean enabled = instance.enabled(); + List policyStats = instance.getPolicyStats(); + switch (between(0, 2)) { + case 0: + available = available == false; + break; + case 1: + enabled = enabled == false; + break; + case 2: + if (policyStats == null) { + policyStats = new ArrayList<>(); + policyStats.add(PolicyStatsTests.createRandomInstance()); + } else if (randomBoolean()) { + policyStats = null; + } else { + policyStats = new ArrayList<>(policyStats); + policyStats.add(PolicyStatsTests.createRandomInstance()); + } + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new IndexLifecycleFeatureSetUsage(available, enabled, policyStats); + } + + @Override + protected Reader instanceReader() { + return IndexLifecycleFeatureSetUsage::new; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/InitializePolicyContextStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/InitializePolicyContextStepTests.java new file mode 100644 index 0000000000000..5dbeee07fe75f --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/InitializePolicyContextStepTests.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.index.Index; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.hamcrest.Matchers.equalTo; + +public class InitializePolicyContextStepTests extends AbstractStepTestCase { + + @Override + public InitializePolicyContextStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + + return new InitializePolicyContextStep(stepKey, nextStepKey); + } + + @Override + public InitializePolicyContextStep mutateInstance(InitializePolicyContextStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + + switch (between(0, 1)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new InitializePolicyContextStep(key, nextKey); + } + + @Override + public InitializePolicyContextStep copyInstance(InitializePolicyContextStep instance) { + return new InitializePolicyContextStep(instance.getKey(), instance.getNextStepKey()); + } + + public void testAddCreationDate() { + long creationDate = randomNonNegativeLong(); + IndexMetaData indexMetadata = IndexMetaData.builder(randomAlphaOfLength(5)) + .settings(settings(Version.CURRENT)) + .creationDate(creationDate) + .numberOfShards(1).numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(indexMetadata)) + .build(); + Index index = indexMetadata.getIndex(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + InitializePolicyContextStep step = new InitializePolicyContextStep(null, null); + ClusterState newState = step.performAction(index, clusterState); + assertThat(getIndexLifecycleDate(index, newState), equalTo(creationDate)); + } + + public void testDoNothing() { + long creationDate = randomNonNegativeLong(); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setIndexCreationDate(creationDate); + IndexMetaData indexMetadata = IndexMetaData.builder(randomAlphaOfLength(5)) + .settings(settings(Version.CURRENT)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .creationDate(creationDate) + .numberOfShards(1).numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(indexMetadata)) + .build(); + Index index = indexMetadata.getIndex(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + InitializePolicyContextStep step = new InitializePolicyContextStep(null, null); + ClusterState newState = step.performAction(index, clusterState); + assertTrue(newState == clusterState); + } + + private long getIndexLifecycleDate(Index index, ClusterState clusterState) { + return LifecycleExecutionState.fromIndexMetadata(clusterState.getMetaData().index(index)).getLifecycleDate(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleExecutionStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleExecutionStateTests.java new file mode 100644 index 0000000000000..e8276ad06ead2 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleExecutionStateTests.java @@ -0,0 +1,150 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.util.HashMap; +import java.util.Map; + +public class LifecycleExecutionStateTests extends ESTestCase { + + public void testConversion() { + Map customMetadata = createCustomMetadata(); + LifecycleExecutionState parsed = LifecycleExecutionState.fromCustomMetadata(customMetadata); + assertEquals(customMetadata, parsed.asMap()); + } + + public void testEmptyValuesAreNotSerialized() { + LifecycleExecutionState empty = LifecycleExecutionState.builder().build(); + assertEquals(new HashMap().entrySet(), empty.asMap().entrySet()); + + Map originalMap = createCustomMetadata(); + LifecycleExecutionState originalState = LifecycleExecutionState.fromCustomMetadata(originalMap); + LifecycleExecutionState.Builder newState = LifecycleExecutionState.builder(originalState); + newState.setPhase(null); + assertFalse(newState.build().asMap().containsKey("phase")); + + newState = LifecycleExecutionState.builder(originalState); + newState.setAction(null); + assertFalse(newState.build().asMap().containsKey("action")); + + newState = LifecycleExecutionState.builder(originalState); + newState.setStep(null); + assertFalse(newState.build().asMap().containsKey("step")); + + newState = LifecycleExecutionState.builder(originalState); + newState.setFailedStep(null); + assertFalse(newState.build().asMap().containsKey("failed_step")); + + newState = LifecycleExecutionState.builder(originalState); + newState.setPhaseDefinition(null); + assertFalse(newState.build().asMap().containsKey("phase_definition")); + + newState = LifecycleExecutionState.builder(originalState); + newState.setStepInfo(null); + assertFalse(newState.build().asMap().containsKey("step_info")); + + newState = LifecycleExecutionState.builder(originalState); + newState.setPhaseTime(null); + assertFalse(newState.build().asMap().containsKey("phase_time")); + + newState = LifecycleExecutionState.builder(originalState); + newState.setActionTime(null); + assertFalse(newState.build().asMap().containsKey("action_time")); + + newState = LifecycleExecutionState.builder(originalState); + newState.setIndexCreationDate(null); + assertFalse(newState.build().asMap().containsKey("creation_date")); + } + + public void testEqualsAndHashcode() { + LifecycleExecutionState original = LifecycleExecutionState.fromCustomMetadata(createCustomMetadata()); + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + original, + toCopy -> LifecycleExecutionState.builder(toCopy).build(), + LifecycleExecutionStateTests::mutate); + } + + private static LifecycleExecutionState mutate(LifecycleExecutionState toMutate) { + LifecycleExecutionState.Builder newState = LifecycleExecutionState.builder(toMutate); + boolean changed = false; + if (randomBoolean()) { + newState.setPhase(randomValueOtherThan(toMutate.getPhase(), () -> randomAlphaOfLengthBetween(5, 20))); + changed = true; + } + if (randomBoolean()) { + newState.setAction(randomValueOtherThan(toMutate.getAction(), () -> randomAlphaOfLengthBetween(5, 20))); + changed = true; + } + if (randomBoolean()) { + newState.setStep(randomValueOtherThan(toMutate.getStep(), () -> randomAlphaOfLengthBetween(5, 20))); + changed = true; + } + if (randomBoolean()) { + newState.setPhaseDefinition(randomValueOtherThan(toMutate.getPhaseDefinition(), () -> randomAlphaOfLengthBetween(5, 20))); + changed = true; + } + if (randomBoolean()) { + newState.setFailedStep(randomValueOtherThan(toMutate.getFailedStep(), () -> randomAlphaOfLengthBetween(5, 20))); + changed = true; + } + if (randomBoolean()) { + newState.setStepInfo(randomValueOtherThan(toMutate.getStepInfo(), () -> randomAlphaOfLengthBetween(5, 20))); + changed = true; + } + if (randomBoolean()) { + newState.setPhaseTime(randomValueOtherThan(toMutate.getPhaseTime(), ESTestCase::randomLong)); + changed = true; + } + if (randomBoolean()) { + newState.setActionTime(randomValueOtherThan(toMutate.getActionTime(), ESTestCase::randomLong)); + changed = true; + } + if (randomBoolean()) { + newState.setStepTime(randomValueOtherThan(toMutate.getStepTime(), ESTestCase::randomLong)); + changed = true; + } + if (randomBoolean()) { + newState.setIndexCreationDate(randomValueOtherThan(toMutate.getLifecycleDate(), ESTestCase::randomLong)); + changed = true; + } + + if (changed == false) { + return LifecycleExecutionState.builder().build(); + } + + return newState.build(); + } + + static Map createCustomMetadata() { + String phase = randomAlphaOfLengthBetween(5,20); + String action = randomAlphaOfLengthBetween(5,20); + String step = randomAlphaOfLengthBetween(5,20); + String failedStep = randomAlphaOfLengthBetween(5,20); + String stepInfo = randomAlphaOfLengthBetween(15,50); + String phaseDefinition = randomAlphaOfLengthBetween(15,50); + long indexCreationDate = randomLong(); + long phaseTime = randomLong(); + long actionTime = randomLong(); + long stepTime = randomLong(); + + Map customMetadata = new HashMap<>(); + customMetadata.put("phase", phase); + customMetadata.put("action", action); + customMetadata.put("step", step); + customMetadata.put("failed_step", failedStep); + customMetadata.put("step_info", stepInfo); + customMetadata.put("phase_definition", phaseDefinition); + customMetadata.put("creation_date", String.valueOf(indexCreationDate)); + customMetadata.put("phase_time", String.valueOf(phaseTime)); + customMetadata.put("action_time", String.valueOf(actionTime)); + customMetadata.put("step_time", String.valueOf(stepTime)); + return customMetadata; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyMetadataTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyMetadataTests.java new file mode 100644 index 0000000000000..5cb75e132ce92 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyMetadataTests.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class LifecyclePolicyMetadataTests extends AbstractSerializingTestCase { + + private String lifecycleName; + + @Before + public void setup() { + lifecycleName = randomAlphaOfLength(20); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + Arrays.asList( + new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new), + new NamedWriteableRegistry.Entry(LifecycleType.class, TimeseriesLifecycleType.TYPE, + (in) -> TimeseriesLifecycleType.INSTANCE), + new NamedWriteableRegistry.Entry(LifecycleAction.class, AllocateAction.NAME, AllocateAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, DeleteAction.NAME, DeleteAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ForceMergeAction.NAME, ForceMergeAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ReadOnlyAction.NAME, ReadOnlyAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, RolloverAction.NAME, RolloverAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ShrinkAction.NAME, ShrinkAction::new) + )); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), + (p) -> TimeseriesLifecycleType.INSTANCE), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse) + )); + return new NamedXContentRegistry(entries); + } + + @Override + protected LifecyclePolicyMetadata doParseInstance(XContentParser parser) throws IOException { + return LifecyclePolicyMetadata.parse(parser, lifecycleName); + } + + @Override + protected LifecyclePolicyMetadata createTestInstance() { + Map headers = new HashMap<>(); + int numberHeaders = between(0, 10); + for (int i = 0; i < numberHeaders; i++) { + headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + return new LifecyclePolicyMetadata(LifecyclePolicyTests.randomTimeseriesLifecyclePolicy(lifecycleName), headers, + randomNonNegativeLong(), randomNonNegativeLong()); + } + + @Override + protected Reader instanceReader() { + return LifecyclePolicyMetadata::new; + } + + @Override + protected LifecyclePolicyMetadata mutateInstance(LifecyclePolicyMetadata instance) throws IOException { + LifecyclePolicy policy = instance.getPolicy(); + Map headers = instance.getHeaders(); + long version = instance.getVersion(); + long creationDate = instance.getModifiedDate(); + switch (between(0, 3)) { + case 0: + policy = new LifecyclePolicy(TimeseriesLifecycleType.INSTANCE, policy.getName() + randomAlphaOfLengthBetween(1, 5), + policy.getPhases()); + break; + case 1: + headers = new HashMap<>(headers); + headers.put(randomAlphaOfLength(11), randomAlphaOfLength(11)); + break; + case 2: + version++; + break; + case 3: + creationDate++; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new LifecyclePolicyMetadata(policy, headers, version, creationDate); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyTests.java new file mode 100644 index 0000000000000..9d90cc025b0e3 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyTests.java @@ -0,0 +1,319 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; + +public class LifecyclePolicyTests extends AbstractSerializingTestCase { + + private String lifecycleName; + + @Override + protected LifecyclePolicy doParseInstance(XContentParser parser) { + return LifecyclePolicy.parse(parser, lifecycleName); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + Arrays.asList( + new NamedWriteableRegistry.Entry(LifecycleType.class, TimeseriesLifecycleType.TYPE, + (in) -> TimeseriesLifecycleType.INSTANCE), + new NamedWriteableRegistry.Entry(LifecycleAction.class, AllocateAction.NAME, AllocateAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, DeleteAction.NAME, DeleteAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ForceMergeAction.NAME, ForceMergeAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ReadOnlyAction.NAME, ReadOnlyAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, RolloverAction.NAME, RolloverAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ShrinkAction.NAME, ShrinkAction::new) + )); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), + (p) -> TimeseriesLifecycleType.INSTANCE), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse) + )); + return new NamedXContentRegistry(entries); + } + + @Override + protected LifecyclePolicy createTestInstance() { + lifecycleName = randomAlphaOfLength(5); + return randomTimeseriesLifecyclePolicy(lifecycleName); + } + + /** + * The same as {@link #randomTimeseriesLifecyclePolicy(String)} but ensures + * that the resulting policy has all valid phases and all valid actions. + */ + public static LifecyclePolicy randomTimeseriesLifecyclePolicyWithAllPhases(@Nullable String lifecycleName) { + List phaseNames = TimeseriesLifecycleType.VALID_PHASES; + Map phases = new HashMap<>(phaseNames.size()); + Function> validActions = (phase) -> { + switch (phase) { + case "hot": + return TimeseriesLifecycleType.VALID_HOT_ACTIONS; + case "warm": + return TimeseriesLifecycleType.VALID_WARM_ACTIONS; + case "cold": + return TimeseriesLifecycleType.VALID_COLD_ACTIONS; + case "delete": + return TimeseriesLifecycleType.VALID_DELETE_ACTIONS; + default: + throw new IllegalArgumentException("invalid phase [" + phase + "]"); + }}; + Function randomAction = (action) -> { + switch (action) { + case AllocateAction.NAME: + return AllocateActionTests.randomInstance(); + case DeleteAction.NAME: + return new DeleteAction(); + case ForceMergeAction.NAME: + return ForceMergeActionTests.randomInstance(); + case ReadOnlyAction.NAME: + return new ReadOnlyAction(); + case RolloverAction.NAME: + return RolloverActionTests.randomInstance(); + case ShrinkAction.NAME: + return ShrinkActionTests.randomInstance(); + default: + throw new IllegalArgumentException("invalid action [" + action + "]"); + }}; + for (String phase : phaseNames) { + TimeValue after = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + Map actions = new HashMap<>(); + Set actionNames = validActions.apply(phase); + for (String action : actionNames) { + actions.put(action, randomAction.apply(action)); + } + phases.put(phase, new Phase(phase, after, actions)); + } + return new LifecyclePolicy(TimeseriesLifecycleType.INSTANCE, lifecycleName, phases); + } + + public static LifecyclePolicy randomTimeseriesLifecyclePolicy(@Nullable String lifecycleName) { + List phaseNames = randomSubsetOf(TimeseriesLifecycleType.VALID_PHASES); + Map phases = new HashMap<>(phaseNames.size()); + Function> validActions = (phase) -> { + switch (phase) { + case "hot": + return TimeseriesLifecycleType.VALID_HOT_ACTIONS; + case "warm": + return TimeseriesLifecycleType.VALID_WARM_ACTIONS; + case "cold": + return TimeseriesLifecycleType.VALID_COLD_ACTIONS; + case "delete": + return TimeseriesLifecycleType.VALID_DELETE_ACTIONS; + default: + throw new IllegalArgumentException("invalid phase [" + phase + "]"); + }}; + Function randomAction = (action) -> { + switch (action) { + case AllocateAction.NAME: + return AllocateActionTests.randomInstance(); + case DeleteAction.NAME: + return new DeleteAction(); + case ForceMergeAction.NAME: + return ForceMergeActionTests.randomInstance(); + case ReadOnlyAction.NAME: + return new ReadOnlyAction(); + case RolloverAction.NAME: + return RolloverActionTests.randomInstance(); + case ShrinkAction.NAME: + return ShrinkActionTests.randomInstance(); + default: + throw new IllegalArgumentException("invalid action [" + action + "]"); + }}; + for (String phase : phaseNames) { + TimeValue after = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + Map actions = new HashMap<>(); + List actionNames = randomSubsetOf(validActions.apply(phase)); + for (String action : actionNames) { + actions.put(action, randomAction.apply(action)); + } + phases.put(phase, new Phase(phase, after, actions)); + } + return new LifecyclePolicy(TimeseriesLifecycleType.INSTANCE, lifecycleName, phases); + } + + public static LifecyclePolicy randomTestLifecyclePolicy(@Nullable String lifecycleName) { + int numberPhases = randomInt(5); + Map phases = new HashMap<>(numberPhases); + for (int i = 0; i < numberPhases; i++) { + TimeValue after = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + Map actions = new HashMap<>(); + if (randomBoolean()) { + MockAction action = new MockAction(); + actions.put(action.getWriteableName(), action); + } + String phaseName = randomAlphaOfLength(10); + phases.put(phaseName, new Phase(phaseName, after, actions)); + } + return new LifecyclePolicy(TestLifecycleType.INSTANCE, lifecycleName, phases); + } + + @Override + protected LifecyclePolicy mutateInstance(LifecyclePolicy instance) throws IOException { + String name = instance.getName(); + Map phases = instance.getPhases(); + switch (between(0, 1)) { + case 0: + name = name + randomAlphaOfLengthBetween(1, 5); + break; + case 1: + String phaseName = randomValueOtherThanMany(phases::containsKey, () -> randomFrom(TimeseriesLifecycleType.VALID_PHASES)); + phases = new LinkedHashMap<>(phases); + phases.put(phaseName, new Phase(phaseName, TimeValue.timeValueSeconds(randomIntBetween(1, 1000)), Collections.emptyMap())); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new LifecyclePolicy(TimeseriesLifecycleType.INSTANCE, name, phases); + } + + @Override + protected Reader instanceReader() { + return LifecyclePolicy::new; + } + + public void testFirstAndLastSteps() { + Client client = mock(Client.class); + lifecycleName = randomAlphaOfLengthBetween(1, 20); + Map phases = new LinkedHashMap<>(); + LifecyclePolicy policy = new LifecyclePolicy(TestLifecycleType.INSTANCE, lifecycleName, phases); + List steps = policy.toSteps(client); + assertThat(steps.size(), equalTo(2)); + assertThat(steps.get(0), instanceOf(InitializePolicyContextStep.class)); + assertThat(steps.get(0).getKey(), equalTo(new StepKey("new", "init", "init"))); + assertThat(steps.get(0).getNextStepKey(), equalTo(TerminalPolicyStep.KEY)); + assertSame(steps.get(1), TerminalPolicyStep.INSTANCE); + } + + public void testToStepsWithOneStep() { + Client client = mock(Client.class); + MockStep mockStep = new MockStep( + new Step.StepKey("test", "test", "test"), TerminalPolicyStep.KEY); + + lifecycleName = randomAlphaOfLengthBetween(1, 20); + Map phases = new LinkedHashMap<>(); + LifecycleAction firstAction = new MockAction(Arrays.asList(mockStep)); + Map actions = Collections.singletonMap(MockAction.NAME, firstAction); + Phase firstPhase = new Phase("test", TimeValue.ZERO, actions); + phases.put(firstPhase.getName(), firstPhase); + LifecyclePolicy policy = new LifecyclePolicy(TestLifecycleType.INSTANCE, lifecycleName, phases); + StepKey firstStepKey = InitializePolicyContextStep.KEY; + StepKey secondStepKey = new StepKey("new", PhaseCompleteStep.NAME, PhaseCompleteStep.NAME); + List steps = policy.toSteps(client); + assertThat(steps.size(), equalTo(4)); + assertSame(steps.get(0).getKey(), firstStepKey); + assertThat(steps.get(0).getNextStepKey(), equalTo(secondStepKey)); + assertThat(steps.get(1).getKey(), equalTo(secondStepKey)); + assertThat(steps.get(1).getNextStepKey(), equalTo(mockStep.getKey())); + assertThat(steps.get(2).getKey(), equalTo(mockStep.getKey())); + assertThat(steps.get(2).getNextStepKey(), equalTo(TerminalPolicyStep.KEY)); + assertSame(steps.get(3), TerminalPolicyStep.INSTANCE); + } + + public void testToStepsWithTwoPhases() { + Client client = mock(Client.class); + MockStep secondActionStep = new MockStep(new StepKey("second_phase", "test2", "test"), TerminalPolicyStep.KEY); + MockStep secondAfter = new MockStep(new StepKey("first_phase", PhaseCompleteStep.NAME, PhaseCompleteStep.NAME), + secondActionStep.getKey()); + MockStep firstActionAnotherStep = new MockStep(new StepKey("first_phase", "test", "bar"), secondAfter.getKey()); + MockStep firstActionStep = new MockStep(new StepKey("first_phase", "test", "foo"), firstActionAnotherStep.getKey()); + MockStep firstAfter = new MockStep(new StepKey("new", PhaseCompleteStep.NAME, PhaseCompleteStep.NAME), firstActionStep.getKey()); + MockStep init = new MockStep(InitializePolicyContextStep.KEY, firstAfter.getKey()); + + lifecycleName = randomAlphaOfLengthBetween(1, 20); + Map phases = new LinkedHashMap<>(); + LifecycleAction firstAction = new MockAction(Arrays.asList(firstActionStep, firstActionAnotherStep)); + LifecycleAction secondAction = new MockAction(Arrays.asList(secondActionStep)); + Map firstActions = Collections.singletonMap(MockAction.NAME, firstAction); + Map secondActions = Collections.singletonMap(MockAction.NAME, secondAction); + Phase firstPhase = new Phase("first_phase", TimeValue.ZERO, firstActions); + Phase secondPhase = new Phase("second_phase", TimeValue.ZERO, secondActions); + phases.put(firstPhase.getName(), firstPhase); + phases.put(secondPhase.getName(), secondPhase); + LifecyclePolicy policy = new LifecyclePolicy(TestLifecycleType.INSTANCE, lifecycleName, phases); + + List steps = policy.toSteps(client); + assertThat(steps.size(), equalTo(7)); + assertThat(steps.get(0).getClass(), equalTo(InitializePolicyContextStep.class)); + assertThat(steps.get(0).getKey(), equalTo(init.getKey())); + assertThat(steps.get(0).getNextStepKey(), equalTo(init.getNextStepKey())); + assertThat(steps.get(1).getClass(), equalTo(PhaseCompleteStep.class)); + assertThat(steps.get(1).getKey(), equalTo(firstAfter.getKey())); + assertThat(steps.get(1).getNextStepKey(), equalTo(firstAfter.getNextStepKey())); + assertThat(steps.get(2), equalTo(firstActionStep)); + assertThat(steps.get(3), equalTo(firstActionAnotherStep)); + assertThat(steps.get(4).getClass(), equalTo(PhaseCompleteStep.class)); + assertThat(steps.get(4).getKey(), equalTo(secondAfter.getKey())); + assertThat(steps.get(4).getNextStepKey(), equalTo(secondAfter.getNextStepKey())); + assertThat(steps.get(5), equalTo(secondActionStep)); + assertSame(steps.get(6), TerminalPolicyStep.INSTANCE); + } + + public void testIsActionSafe() { + Map phases = new LinkedHashMap<>(); + LifecycleAction firstAction = new MockAction(Collections.emptyList(), true); + LifecycleAction secondAction = new MockAction(Collections.emptyList(), false); + Map firstActions = Collections.singletonMap(MockAction.NAME, firstAction); + Map secondActions = Collections.singletonMap(MockAction.NAME, secondAction); + Phase firstPhase = new Phase("first_phase", TimeValue.ZERO, firstActions); + Phase secondPhase = new Phase("second_phase", TimeValue.ZERO, secondActions); + phases.put(firstPhase.getName(), firstPhase); + phases.put(secondPhase.getName(), secondPhase); + LifecyclePolicy policy = new LifecyclePolicy(TestLifecycleType.INSTANCE, lifecycleName, phases); + + assertTrue(policy.isActionSafe(new StepKey("first_phase", MockAction.NAME, randomAlphaOfLength(10)))); + + assertFalse(policy.isActionSafe(new StepKey("second_phase", MockAction.NAME, randomAlphaOfLength(10)))); + + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> policy.isActionSafe(new StepKey("non_existant_phase", MockAction.NAME, randomAlphaOfLength(10)))); + assertEquals("Phase [non_existant_phase] does not exist in policy [" + policy.getName() + "]", exception.getMessage()); + + exception = expectThrows(IllegalArgumentException.class, + () -> policy.isActionSafe(new StepKey("first_phase", "non_existant_action", randomAlphaOfLength(10)))); + assertEquals("Action [non_existant_action] in phase [first_phase] does not exist in policy [" + policy.getName() + "]", + exception.getMessage()); + + assertTrue(policy.isActionSafe(new StepKey("new", randomAlphaOfLength(10), randomAlphaOfLength(10)))); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockAction.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockAction.java new file mode 100644 index 0000000000000..30eabac562606 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockAction.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +public class MockAction implements LifecycleAction { + public static final String NAME = "TEST_ACTION"; + private List steps; + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, MockAction::new); + private final boolean safe; + + public static MockAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public MockAction() { + this(Collections.emptyList()); + } + + public MockAction(List steps) { + this(steps, true); + } + + public MockAction(List steps, boolean safe) { + this.steps = steps; + this.safe = safe; + } + + public MockAction(StreamInput in) throws IOException { + this.steps = in.readList(MockStep::new); + this.safe = in.readBoolean(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + public List getSteps() { + return steps; + } + + @Override + public boolean isSafeAction() { + return safe; + } + + @Override + public List toSteps(Client client, String phase, Step.StepKey nextStepKey) { + return new ArrayList<>(steps); + } + + @Override + public List toStepKeys(String phase) { + return steps.stream().map(Step::getKey).collect(Collectors.toList()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(steps.stream().map(MockStep::new).collect(Collectors.toList())); + out.writeBoolean(safe); + } + + @Override + public int hashCode() { + return Objects.hash(steps, safe); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + MockAction other = (MockAction) obj; + return Objects.equals(steps, other.steps) && + Objects.equals(safe, other.safe); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockActionTests.java new file mode 100644 index 0000000000000..1f10aa051cbe4 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockActionTests.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class MockActionTests extends AbstractActionTestCase { + + @Override + protected MockAction createTestInstance() { + return new MockAction(); + } + + @Override + protected MockAction doParseInstance(XContentParser parser) throws IOException { + return MockAction.parse(parser); + } + + @Override + protected Reader instanceReader() { + return MockAction::new; + } + + @Override + protected MockAction mutateInstance(MockAction instance) throws IOException { + List steps = instance.getSteps(); + boolean safe = instance.isSafeAction(); + if (randomBoolean()) { + steps = new ArrayList<>(steps); + if (steps.size() > 0) { + Step lastStep = steps.remove(steps.size() - 1); + if (randomBoolean()) { + Step.StepKey additionalStepKey = randomStepKey(); + steps.add(new MockStep(lastStep.getKey(), additionalStepKey)); + steps.add(new MockStep(additionalStepKey, null)); + } + } else { + steps.add(new MockStep(randomStepKey(), null)); + } + } else { + safe = safe == false; + } + return new MockAction(steps, safe); + } + + private static Step.StepKey randomStepKey() { + return new Step.StepKey(randomAlphaOfLength(5), + randomAlphaOfLength(5), randomAlphaOfLength(5)); + } + + @Override + public void testToSteps() { + int numSteps = randomIntBetween(1, 10); + List steps = new ArrayList<>(numSteps); + for (int i = 0; i < numSteps; i++) { + steps.add(new MockStep(randomStepKey(), randomStepKey())); + } + MockAction action = new MockAction(steps); + assertEquals(action.getSteps(), action.toSteps(null, null, null)); + } +} + diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockStep.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockStep.java new file mode 100644 index 0000000000000..7de2bd14c5f71 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockStep.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; + +public class MockStep extends Step implements Writeable { + public static final String NAME = "TEST_STEP"; + + public MockStep(StepKey stepKey, Step.StepKey nextStepKey) { + super(stepKey, nextStepKey); + } + + public MockStep(Step other) { + super(other.getKey(), other.getNextStepKey()); + } + + public MockStep(StreamInput in) throws IOException { + super(new StepKey(in.readString(), in.readString(), in.readString()), readOptionalNextStepKey(in)); + } + + private static StepKey readOptionalNextStepKey(StreamInput in) throws IOException { + if (in.readBoolean()) { + return new StepKey(in.readString(), in.readString(), in.readString()); + } + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(getKey().getPhase()); + out.writeString(getKey().getAction()); + out.writeString(getKey().getName()); + boolean hasNextStep = getNextStepKey() != null; + out.writeBoolean(hasNextStep); + if (hasNextStep) { + out.writeString(getNextStepKey().getPhase()); + out.writeString(getNextStepKey().getAction()); + out.writeString(getNextStepKey().getName()); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/OperationModeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/OperationModeTests.java new file mode 100644 index 0000000000000..d99868fe178a7 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/OperationModeTests.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; + +public class OperationModeTests extends ESTestCase { + + public void testIsValidChange() { + assertFalse(OperationMode.RUNNING.isValidChange(OperationMode.RUNNING)); + assertTrue(OperationMode.RUNNING.isValidChange(OperationMode.STOPPING)); + assertFalse(OperationMode.RUNNING.isValidChange(OperationMode.STOPPED)); + + assertTrue(OperationMode.STOPPING.isValidChange(OperationMode.RUNNING)); + assertFalse(OperationMode.STOPPING.isValidChange(OperationMode.STOPPING)); + assertTrue(OperationMode.STOPPING.isValidChange(OperationMode.STOPPED)); + + assertTrue(OperationMode.STOPPED.isValidChange(OperationMode.RUNNING)); + assertFalse(OperationMode.STOPPED.isValidChange(OperationMode.STOPPING)); + assertFalse(OperationMode.STOPPED.isValidChange(OperationMode.STOPPED)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseCompleteStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseCompleteStepTests.java new file mode 100644 index 0000000000000..eea46baadf65f --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseCompleteStepTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +public class PhaseCompleteStepTests extends AbstractStepTestCase { + + @Override + public PhaseCompleteStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + return new PhaseCompleteStep(stepKey, nextStepKey); + } + + @Override + public PhaseCompleteStep mutateInstance(PhaseCompleteStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + + if (randomBoolean()) { + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + } else { + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + } + + return new PhaseCompleteStep(key, nextKey); + } + + @Override + public PhaseCompleteStep copyInstance(PhaseCompleteStep instance) { + return new PhaseCompleteStep(instance.getKey(), instance.getNextStepKey()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseExecutionInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseExecutionInfoTests.java new file mode 100644 index 0000000000000..9198282a0717c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseExecutionInfoTests.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class PhaseExecutionInfoTests extends AbstractSerializingTestCase { + + static PhaseExecutionInfo randomPhaseExecutionInfo(String phaseName) { + return new PhaseExecutionInfo(randomAlphaOfLength(5), PhaseTests.randomTestPhase(phaseName), + randomNonNegativeLong(), randomNonNegativeLong()); + } + + String phaseName; + + @Before + public void setupPhaseName() { + phaseName = randomAlphaOfLength(7); + } + + @Override + protected PhaseExecutionInfo createTestInstance() { + return randomPhaseExecutionInfo(phaseName); + } + + @Override + protected Reader instanceReader() { + return PhaseExecutionInfo::new; + } + + @Override + protected PhaseExecutionInfo doParseInstance(XContentParser parser) throws IOException { + return PhaseExecutionInfo.parse(parser, phaseName); + } + + @Override + protected PhaseExecutionInfo mutateInstance(PhaseExecutionInfo instance) throws IOException { + String policyName = instance.getPolicyName(); + Phase phase = instance.getPhase(); + long version = instance.getVersion(); + long modifiedDate = instance.getModifiedDate(); + switch (between(0, 3)) { + case 0: + policyName = policyName + randomAlphaOfLengthBetween(1, 5); + break; + case 1: + phase = randomValueOtherThan(phase, () -> PhaseTests.randomTestPhase(randomAlphaOfLength(6))); + break; + case 2: + version++; + break; + case 3: + modifiedDate++; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new PhaseExecutionInfo(policyName, phase, version, modifiedDate); + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Arrays + .asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new))); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(MockAction.NAME), MockAction::parse)); + return new NamedXContentRegistry(entries); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseStatsTests.java new file mode 100644 index 0000000000000..ae4325abe5d87 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseStatsTests.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PhaseStats; + +import java.io.IOException; +import java.util.Arrays; + +public class PhaseStatsTests extends AbstractWireSerializingTestCase { + + @Override + protected PhaseStats createTestInstance() { + return createRandomInstance(); + } + + public static PhaseStats createRandomInstance() { + TimeValue after = TimeValue.parseTimeValue(randomTimeValue(), "phase_stats_tests"); + String[] actionNames = randomArray(0, 20, size -> new String[size], () -> randomAlphaOfLengthBetween(1, 20)); + return new PhaseStats(after, actionNames); + } + + @Override + protected PhaseStats mutateInstance(PhaseStats instance) throws IOException { + TimeValue after = instance.getAfter(); + String[] actionNames = instance.getActionNames(); + switch (between(0, 1)) { + case 0: + after = randomValueOtherThan(after, () -> TimeValue.parseTimeValue(randomPositiveTimeValue(), "rollover_action_test")); + break; + case 1: + actionNames = randomValueOtherThanMany(a -> Arrays.equals(a, instance.getActionNames()), + () -> randomArray(0, 20, size -> new String[size], () -> randomAlphaOfLengthBetween(1, 20))); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new PhaseStats(after, actionNames); + } + + @Override + protected Reader instanceReader() { + return PhaseStats::new; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseTests.java new file mode 100644 index 0000000000000..0c3530216f26c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class PhaseTests extends AbstractSerializingTestCase { + private String phaseName; + + @Before + public void setup() { + phaseName = randomAlphaOfLength(20); + } + + @Override + protected Phase createTestInstance() { + return randomTestPhase(phaseName); + } + + static Phase randomTestPhase(String phaseName) { + TimeValue after = null; + if (randomBoolean()) { + after = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + } + Map actions = Collections.emptyMap(); + if (randomBoolean()) { + actions = Collections.singletonMap(MockAction.NAME, new MockAction()); + } + return new Phase(phaseName, after, actions); + } + + @Override + protected Phase doParseInstance(XContentParser parser) throws IOException { + return Phase.parse(parser, phaseName); + } + + @Override + protected Reader instanceReader() { + return Phase::new; + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Arrays + .asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new))); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(MockAction.NAME), MockAction::parse)); + return new NamedXContentRegistry(entries); + } + + @Override + protected Phase mutateInstance(Phase instance) throws IOException { + String name = instance.getName(); + TimeValue after = instance.getMinimumAge(); + Map actions = instance.getActions(); + switch (between(0, 2)) { + case 0: + name = name + randomAlphaOfLengthBetween(1, 5); + break; + case 1: + after = TimeValue.timeValueSeconds(after.getSeconds() + randomIntBetween(1, 1000)); + break; + case 2: + actions = new HashMap<>(actions); + actions.put(MockAction.NAME + "another", new MockAction(Collections.emptyList())); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new Phase(name, after, actions); + } + + public void testDefaultAfter() { + Phase phase = new Phase(randomAlphaOfLength(20), null, Collections.emptyMap()); + assertEquals(TimeValue.ZERO, phase.getMinimumAge()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PolicyStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PolicyStatsTests.java new file mode 100644 index 0000000000000..29a6912a7370f --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PolicyStatsTests.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PhaseStats; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PolicyStats; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class PolicyStatsTests extends AbstractWireSerializingTestCase { + + @Override + protected PolicyStats createTestInstance() { + return createRandomInstance(); + } + + public static PolicyStats createRandomInstance() { + int size = randomIntBetween(0, 10); + Map phaseStats = new HashMap<>(size); + for (int i = 0; i < size; i++) { + phaseStats.put(randomAlphaOfLengthBetween(1, 20), PhaseStatsTests.createRandomInstance()); + } + return new PolicyStats(phaseStats, randomIntBetween(0, 100)); + } + + @Override + protected PolicyStats mutateInstance(PolicyStats instance) throws IOException { + Map phaseStats = instance.getPhaseStats(); + int indicesManaged = instance.getIndicesManaged(); + switch (between(0, 1)) { + case 0: + phaseStats = new HashMap<>(instance.getPhaseStats()); + phaseStats.put(randomAlphaOfLengthBetween(21, 25), PhaseStatsTests.createRandomInstance()); + break; + case 1: + indicesManaged += randomIntBetween(1, 10); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new PolicyStats(phaseStats, indicesManaged); + } + + @Override + protected Reader instanceReader() { + return PolicyStats::new; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyActionTests.java new file mode 100644 index 0000000000000..1d28985fac1db --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyActionTests.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class ReadOnlyActionTests extends AbstractActionTestCase { + + @Override + protected ReadOnlyAction doParseInstance(XContentParser parser) { + return ReadOnlyAction.parse(parser); + } + + @Override + protected ReadOnlyAction createTestInstance() { + return new ReadOnlyAction(); + } + + @Override + protected Reader instanceReader() { + return ReadOnlyAction::new; + } + + public void testToSteps() { + ReadOnlyAction action = createTestInstance(); + String phase = randomAlphaOfLengthBetween(1, 10); + StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10)); + List steps = action.toSteps(null, phase, nextStepKey); + assertNotNull(steps); + assertEquals(1, steps.size()); + StepKey expectedFirstStepKey = new StepKey(phase, ReadOnlyAction.NAME, ReadOnlyAction.NAME); + UpdateSettingsStep firstStep = (UpdateSettingsStep) steps.get(0); + assertThat(firstStep.getKey(), equalTo(expectedFirstStepKey)); + assertThat(firstStep.getNextStepKey(), equalTo(nextStepKey)); + assertThat(firstStep.getSettings().size(), equalTo(1)); + assertTrue(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.get(firstStep.getSettings())); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverActionTests.java new file mode 100644 index 0000000000000..f13a09ac7476e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverActionTests.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.List; + +public class RolloverActionTests extends AbstractActionTestCase { + + @Override + protected RolloverAction doParseInstance(XContentParser parser) throws IOException { + return RolloverAction.parse(parser); + } + + @Override + protected RolloverAction createTestInstance() { + return randomInstance(); + } + + static RolloverAction randomInstance() { + ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values()); + ByteSizeValue maxSize = randomBoolean() ? null : new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); + Long maxDocs = randomBoolean() ? null : randomNonNegativeLong(); + TimeValue maxAge = (maxDocs == null && maxSize == null || randomBoolean()) + ? TimeValue.parseTimeValue(randomPositiveTimeValue(), "rollover_action_test") + : null; + return new RolloverAction(maxSize, maxAge, maxDocs); + } + + @Override + protected Reader instanceReader() { + return RolloverAction::new; + } + + @Override + protected RolloverAction mutateInstance(RolloverAction instance) throws IOException { + ByteSizeValue maxSize = instance.getMaxSize(); + TimeValue maxAge = instance.getMaxAge(); + Long maxDocs = instance.getMaxDocs(); + switch (between(0, 2)) { + case 0: + maxSize = randomValueOtherThan(maxSize, () -> { + ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values()); + return new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); + }); + break; + case 1: + maxAge = randomValueOtherThan(maxAge, + () -> TimeValue.parseTimeValue(randomPositiveTimeValue(), "rollover_action_test")); + break; + case 2: + maxDocs = maxDocs == null ? randomNonNegativeLong() : maxDocs + 1; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new RolloverAction(maxSize, maxAge, maxDocs); + } + + public void testNoConditions() { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new RolloverAction(null, null, null)); + assertEquals("At least one rollover condition must be set.", exception.getMessage()); + } + + public void testToSteps() { + RolloverAction action = createTestInstance(); + String phase = randomAlphaOfLengthBetween(1, 10); + StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10)); + List steps = action.toSteps(null, phase, nextStepKey); + assertNotNull(steps); + assertEquals(2, steps.size()); + StepKey expectedFirstStepKey = new StepKey(phase, RolloverAction.NAME, RolloverStep.NAME); + StepKey expectedSecondStepKey = new StepKey(phase, RolloverAction.NAME, UpdateRolloverLifecycleDateStep.NAME); + RolloverStep firstStep = (RolloverStep) steps.get(0); + UpdateRolloverLifecycleDateStep secondStep = (UpdateRolloverLifecycleDateStep) steps.get(1); + assertEquals(expectedFirstStepKey, firstStep.getKey()); + assertEquals(expectedSecondStepKey, secondStep.getKey()); + assertEquals(secondStep.getKey(), firstStep.getNextStepKey()); + assertEquals(action.getMaxSize(), firstStep.getMaxSize()); + assertEquals(action.getMaxAge(), firstStep.getMaxAge()); + assertEquals(action.getMaxDocs(), firstStep.getMaxDocs()); + assertEquals(nextStepKey, secondStep.getNextStepKey()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStepTests.java new file mode 100644 index 0000000000000..596099e6e275b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStepTests.java @@ -0,0 +1,341 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.rollover.Condition; +import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; +import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; +import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.junit.Before; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; + +public class RolloverStepTests extends AbstractStepTestCase { + + private Client client; + + @Before + public void setup() { + client = Mockito.mock(Client.class); + } + + @Override + public RolloverStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values()); + ByteSizeValue maxSize = randomBoolean() ? null : new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); + Long maxDocs = randomBoolean() ? null : randomNonNegativeLong(); + TimeValue maxAge = (maxDocs == null && maxSize == null || randomBoolean()) + ? TimeValue.parseTimeValue(randomPositiveTimeValue(), "rollover_action_test") + : null; + return new RolloverStep(stepKey, nextStepKey, client, maxSize, maxAge, maxDocs); + } + + @Override + public RolloverStep mutateInstance(RolloverStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + ByteSizeValue maxSize = instance.getMaxSize(); + TimeValue maxAge = instance.getMaxAge(); + Long maxDocs = instance.getMaxDocs(); + + switch (between(0, 4)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + maxSize = randomValueOtherThan(maxSize, () -> { + ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values()); + return new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); + }); + break; + case 3: + maxAge = TimeValue.parseTimeValue(randomPositiveTimeValue(), "rollover_action_test"); + break; + case 4: + maxDocs = randomNonNegativeLong(); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new RolloverStep(key, nextKey, instance.getClient(), maxSize, maxAge, maxDocs); + } + + @Override + public RolloverStep copyInstance(RolloverStep instance) { + return new RolloverStep(instance.getKey(), instance.getNextStepKey(), instance.getClient(), + instance.getMaxSize(), instance.getMaxAge(), instance.getMaxDocs()); + } + + private static void assertRolloverIndexRequest(RolloverRequest request, String alias, Set> expectedConditions) { + assertNotNull(request); + assertEquals(1, request.indices().length); + assertEquals(alias, request.indices()[0]); + assertEquals(alias, request.getAlias()); + assertEquals(expectedConditions.size(), request.getConditions().size()); + Set expectedConditionValues = expectedConditions.stream().map(Condition::value).collect(Collectors.toSet()); + Set actualConditionValues = request.getConditions().values().stream() + .map(Condition::value).collect(Collectors.toSet()); + assertEquals(expectedConditionValues, actualConditionValues); + } + + public void testPerformAction() { + String alias = randomAlphaOfLength(5); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .putAlias(AliasMetaData.builder(alias)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + RolloverStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + RolloverRequest request = (RolloverRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + Set> expectedConditions = new HashSet<>(); + if (step.getMaxAge() != null) { + expectedConditions.add(new MaxAgeCondition(step.getMaxAge())); + } + if (step.getMaxSize() != null) { + expectedConditions.add(new MaxSizeCondition(step.getMaxSize())); + } + if (step.getMaxDocs() != null) { + expectedConditions.add(new MaxDocsCondition(step.getMaxDocs())); + } + assertRolloverIndexRequest(request, alias, expectedConditions); + listener.onResponse(new RolloverResponse(null, null, Collections.emptyMap(), request.isDryRun(), true, true, true)); + return null; + } + + }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); + + SetOnce actionCompleted = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + + @Override + public void onResponse(boolean complete, ToXContentObject obj) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertEquals(true, actionCompleted.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any()); + } + + public void testPerformActionNotComplete() { + String alias = randomAlphaOfLength(5); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .putAlias(AliasMetaData.builder(alias)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + RolloverStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + RolloverRequest request = (RolloverRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + Set> expectedConditions = new HashSet<>(); + if (step.getMaxAge() != null) { + expectedConditions.add(new MaxAgeCondition(step.getMaxAge())); + } + if (step.getMaxSize() != null) { + expectedConditions.add(new MaxSizeCondition(step.getMaxSize())); + } + if (step.getMaxDocs() != null) { + expectedConditions.add(new MaxDocsCondition(step.getMaxDocs())); + } + assertRolloverIndexRequest(request, alias, expectedConditions); + listener.onResponse(new RolloverResponse(null, null, Collections.emptyMap(), request.isDryRun(), false, true, true)); + return null; + } + + }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); + + SetOnce actionCompleted = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + + @Override + public void onResponse(boolean complete, ToXContentObject obj) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertEquals(false, actionCompleted.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any()); + } + + public void testPerformActionFailure() { + String alias = randomAlphaOfLength(5); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .putAlias(AliasMetaData.builder(alias)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Exception exception = new RuntimeException(); + RolloverStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + RolloverRequest request = (RolloverRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + Set> expectedConditions = new HashSet<>(); + if (step.getMaxAge() != null) { + expectedConditions.add(new MaxAgeCondition(step.getMaxAge())); + } + if (step.getMaxSize() != null) { + expectedConditions.add(new MaxSizeCondition(step.getMaxSize())); + } + if (step.getMaxDocs() != null) { + expectedConditions.add(new MaxDocsCondition(step.getMaxDocs())); + } + assertRolloverIndexRequest(request, alias, expectedConditions); + listener.onFailure(exception); + return null; + } + + }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); + + SetOnce exceptionThrown = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + + @Override + public void onResponse(boolean complete, ToXContentObject obj) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertSame(exception, e); + exceptionThrown.set(true); + } + }); + + assertEquals(true, exceptionThrown.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any()); + } + + public void testPerformActionInvalidNullOrEmptyAlias() { + String alias = randomBoolean() ? "" : null; + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + RolloverStep step = createRandomInstance(); + + SetOnce exceptionThrown = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + @Override + public void onResponse(boolean complete, ToXContentObject obj) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + exceptionThrown.set(e); + } + }); + assertThat(exceptionThrown.get().getClass(), equalTo(IllegalArgumentException.class)); + assertThat(exceptionThrown.get().getMessage(), equalTo(String.format(Locale.ROOT, + "setting [%s] for index [%s] is empty or not defined", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, + indexMetaData.getIndex().getName()))); + } + + public void testPerformActionAliasDoesNotPointToIndex() { + String alias = randomAlphaOfLength(5); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + RolloverStep step = createRandomInstance(); + + SetOnce exceptionThrown = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + @Override + public void onResponse(boolean complete, ToXContentObject obj) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + exceptionThrown.set(e); + } + }); + assertThat(exceptionThrown.get().getClass(), equalTo(IllegalArgumentException.class)); + assertThat(exceptionThrown.get().getMessage(), equalTo(String.format(Locale.ROOT, + "%s [%s] does not point to index [%s]", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias, + indexMetaData.getIndex().getName()))); + + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStepInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStepInfoTests.java new file mode 100644 index 0000000000000..a5c1813de2153 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStepInfoTests.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.xpack.core.indexlifecycle.SegmentCountStep.Info; + +import java.io.IOException; + +public class SegmentCountStepInfoTests extends AbstractXContentTestCase { + + @Override + protected Info createTestInstance() { + return new Info(randomNonNegativeLong()); + } + + @Override + protected Info doParseInstance(XContentParser parser) throws IOException { + return Info.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public final void testEqualsAndHashcode() { + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), this::copyInstance, this::mutateInstance); + } + } + + protected final Info copyInstance(Info instance) throws IOException { + return new Info(instance.getNumberShardsLeftToMerge()); + } + + protected Info mutateInstance(Info instance) throws IOException { + return createTestInstance(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStepTests.java new file mode 100644 index 0000000000000..ae0551020fbd1 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStepTests.java @@ -0,0 +1,236 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.segments.IndexSegments; +import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.elasticsearch.action.admin.indices.segments.ShardSegments; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.engine.Segment; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Spliterator; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.any; + +public class SegmentCountStepTests extends AbstractStepTestCase { + + @Override + public SegmentCountStep createRandomInstance() { + Step.StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + int maxNumSegments = randomIntBetween(1, 10); + + return new SegmentCountStep(stepKey, nextStepKey, null, maxNumSegments); + } + + private IndexMetaData makeMeta(Index index) { + return IndexMetaData.builder(index.getName()) + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .build(); + } + + @Override + public SegmentCountStep mutateInstance(SegmentCountStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + int maxNumSegments = instance.getMaxNumSegments(); + + switch (between(0, 2)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + maxNumSegments += 1; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new SegmentCountStep(key, nextKey, null, maxNumSegments); + } + + @Override + public SegmentCountStep copyInstance(SegmentCountStep instance) { + return new SegmentCountStep(instance.getKey(), instance.getNextStepKey(), null, instance.getMaxNumSegments()); + } + + public void testIsConditionMet() { + int maxNumSegments = randomIntBetween(3, 10); + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Client client = Mockito.mock(Client.class); + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + IndicesSegmentResponse indicesSegmentResponse = Mockito.mock(IndicesSegmentResponse.class); + IndexSegments indexSegments = Mockito.mock(IndexSegments.class); + IndexShardSegments indexShardSegments = Mockito.mock(IndexShardSegments.class); + Map indexShards = Collections.singletonMap(0, indexShardSegments); + ShardSegments shardSegmentsOne = Mockito.mock(ShardSegments.class); + ShardSegments[] shardSegmentsArray = new ShardSegments[] { shardSegmentsOne }; + Spliterator iss = indexShards.values().spliterator(); + List segments = new ArrayList<>(); + for (int i = 0; i < maxNumSegments - randomIntBetween(0, 3); i++) { + segments.add(null); + } + Mockito.when(indicesSegmentResponse.getStatus()).thenReturn(RestStatus.OK); + Mockito.when(indicesSegmentResponse.getIndices()).thenReturn(Collections.singletonMap(index.getName(), indexSegments)); + Mockito.when(indexSegments.spliterator()).thenReturn(iss); + Mockito.when(indexShardSegments.getShards()).thenReturn(shardSegmentsArray); + Mockito.when(shardSegmentsOne.getSegments()).thenReturn(segments); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + + Step.StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + + Mockito.doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(indicesSegmentResponse); + return null; + }).when(indicesClient).segments(any(), any()); + + SetOnce conditionMetResult = new SetOnce<>(); + SetOnce conditionInfo = new SetOnce<>(); + + SegmentCountStep step = new SegmentCountStep(stepKey, nextStepKey, client, maxNumSegments); + step.evaluateCondition(makeMeta(index), new AsyncWaitStep.Listener() { + @Override + public void onResponse(boolean conditionMet, ToXContentObject info) { + conditionMetResult.set(conditionMet); + conditionInfo.set(info); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("unexpected method call"); + } + }); + + assertTrue(conditionMetResult.get()); + assertEquals(new SegmentCountStep.Info(0L), conditionInfo.get()); + } + + public void testIsConditionFails() { + int maxNumSegments = randomIntBetween(3, 10); + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Client client = Mockito.mock(Client.class); + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + IndicesSegmentResponse indicesSegmentResponse = Mockito.mock(IndicesSegmentResponse.class); + IndexSegments indexSegments = Mockito.mock(IndexSegments.class); + IndexShardSegments indexShardSegments = Mockito.mock(IndexShardSegments.class); + Map indexShards = Collections.singletonMap(0, indexShardSegments); + ShardSegments shardSegmentsOne = Mockito.mock(ShardSegments.class); + ShardSegments[] shardSegmentsArray = new ShardSegments[] { shardSegmentsOne }; + Spliterator iss = indexShards.values().spliterator(); + List segments = new ArrayList<>(); + for (int i = 0; i < maxNumSegments + randomIntBetween(1, 3); i++) { + segments.add(null); + } + Mockito.when(indicesSegmentResponse.getStatus()).thenReturn(RestStatus.OK); + Mockito.when(indicesSegmentResponse.getIndices()).thenReturn(Collections.singletonMap(index.getName(), indexSegments)); + Mockito.when(indexSegments.spliterator()).thenReturn(iss); + Mockito.when(indexShardSegments.getShards()).thenReturn(shardSegmentsArray); + Mockito.when(shardSegmentsOne.getSegments()).thenReturn(segments); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + + Step.StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + + Mockito.doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(indicesSegmentResponse); + return null; + }).when(indicesClient).segments(any(), any()); + + SetOnce conditionMetResult = new SetOnce<>(); + SetOnce conditionInfo = new SetOnce<>(); + + SegmentCountStep step = new SegmentCountStep(stepKey, nextStepKey, client, maxNumSegments); + step.evaluateCondition(makeMeta(index), new AsyncWaitStep.Listener() { + @Override + public void onResponse(boolean conditionMet, ToXContentObject info) { + conditionMetResult.set(conditionMet); + conditionInfo.set(info); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("unexpected method call"); + } + }); + + assertFalse(conditionMetResult.get()); + assertEquals(new SegmentCountStep.Info(1L), conditionInfo.get()); + } + + public void testThrowsException() { + Exception exception = new RuntimeException("error"); + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Client client = Mockito.mock(Client.class); + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + + Step.StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + int maxNumSegments = randomIntBetween(3, 10); + + Mockito.doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onFailure(exception); + return null; + }).when(indicesClient).segments(any(), any()); + + SetOnce exceptionThrown = new SetOnce<>(); + + SegmentCountStep step = new SegmentCountStep(stepKey, nextStepKey, client, maxNumSegments); + step.evaluateCondition(makeMeta(index), new AsyncWaitStep.Listener() { + @Override + public void onResponse(boolean conditionMet, ToXContentObject info) { + throw new AssertionError("unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertThat(e, equalTo(exception)); + exceptionThrown.set(true); + } + }); + + assertTrue(exceptionThrown.get()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStepTests.java new file mode 100644 index 0000000000000..b42ada6956f87 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStepTests.java @@ -0,0 +1,428 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.Settings.Builder; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.node.Node; +import org.elasticsearch.xpack.core.indexlifecycle.AsyncActionStep.Listener; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.hamcrest.Matchers; +import org.junit.Before; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; + +public class SetSingleNodeAllocateStepTests extends AbstractStepTestCase { + + private Client client; + + @Before + public void setup() { + client = Mockito.mock(Client.class); + } + + @Override + protected SetSingleNodeAllocateStep createRandomInstance() { + return new SetSingleNodeAllocateStep(randomStepKey(), randomStepKey(), client); + } + + @Override + protected SetSingleNodeAllocateStep mutateInstance(SetSingleNodeAllocateStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + + switch (between(0, 1)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new SetSingleNodeAllocateStep(key, nextKey, instance.getClient()); + } + + @Override + protected SetSingleNodeAllocateStep copyInstance(SetSingleNodeAllocateStep instance) { + return new SetSingleNodeAllocateStep(instance.getKey(), instance.getNextStepKey(), client); + } + + public static void assertSettingsRequestContainsValueFrom(UpdateSettingsRequest request, String settingsKey, + Set acceptableValues, boolean assertOnlyKeyInSettings, + String... expectedIndices) { + assertNotNull(request); + assertArrayEquals(expectedIndices, request.indices()); + assertThat(request.settings().get(settingsKey), anyOf(acceptableValues.stream().map(e -> equalTo(e)).collect(Collectors.toList()))); + if (assertOnlyKeyInSettings) { + assertEquals(1, request.settings().size()); + } + } + + public void testPerformActionNoAttrs() throws IOException { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Index index = indexMetaData.getIndex(); + Set validNodeIds = new HashSet<>(); + Settings validNodeSettings = Settings.EMPTY; + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); + int numNodes = randomIntBetween(1, 20); + for (int i = 0; i < numNodes; i++) { + String nodeId = "node_id_" + i; + String nodeName = "node_" + i; + int nodePort = 9300 + i; + Settings nodeSettings = Settings.builder().put(validNodeSettings).put(Node.NODE_NAME_SETTING.getKey(), nodeName).build(); + nodes.add( + DiscoveryNode.createLocal(nodeSettings, new TransportAddress(TransportAddress.META_ADDRESS, nodePort), nodeId)); + validNodeIds.add(nodeId); + } + + assertNodeSelected(indexMetaData, index, validNodeIds, nodes); + } + + public void testPerformActionAttrsAllNodesValid() throws IOException { + int numAttrs = randomIntBetween(1, 10); + String[][] validAttrs = new String[numAttrs][2]; + for (int i = 0; i < numAttrs; i++) { + validAttrs[i] = new String[] { randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20) }; + } + Settings.Builder indexSettings = settings(Version.CURRENT); + for (String[] attr : validAttrs) { + indexSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + attr[0], attr[1]); + } + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(indexSettings) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Index index = indexMetaData.getIndex(); + Set validNodeIds = new HashSet<>(); + Settings validNodeSettings = Settings.EMPTY; + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); + int numNodes = randomIntBetween(1, 20); + for (int i = 0; i < numNodes; i++) { + String nodeId = "node_id_" + i; + String nodeName = "node_" + i; + int nodePort = 9300 + i; + String[] nodeAttr = randomFrom(validAttrs); + Settings nodeSettings = Settings.builder().put(validNodeSettings).put(Node.NODE_NAME_SETTING.getKey(), nodeName) + .put(Node.NODE_ATTRIBUTES.getKey() + nodeAttr[0], nodeAttr[1]).build(); + nodes.add(DiscoveryNode.createLocal(nodeSettings, new TransportAddress(TransportAddress.META_ADDRESS, nodePort), nodeId)); + validNodeIds.add(nodeId); + } + + assertNodeSelected(indexMetaData, index, validNodeIds, nodes); + } + + public void testPerformActionAttrsSomeNodesValid() throws IOException { + String[] validAttr = new String[] { "box_type", "valid" }; + String[] invalidAttr = new String[] { "box_type", "not_valid" }; + Settings.Builder indexSettings = settings(Version.CURRENT); + indexSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + validAttr[0], validAttr[1]); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(indexSettings) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Index index = indexMetaData.getIndex(); + Set validNodeIds = new HashSet<>(); + Settings validNodeSettings = Settings.builder().put(Node.NODE_ATTRIBUTES.getKey() + validAttr[0], validAttr[1]).build(); + Settings invalidNodeSettings = Settings.builder().put(Node.NODE_ATTRIBUTES.getKey() + invalidAttr[0], invalidAttr[1]).build(); + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); + int numNodes = randomIntBetween(1, 20); + for (int i = 0; i < numNodes; i++) { + String nodeId = "node_id_" + i; + String nodeName = "node_" + i; + int nodePort = 9300 + i; + Builder nodeSettingsBuilder = Settings.builder(); + // randomise whether the node had valid attributes or not but make sure at least one node is valid + if (randomBoolean() || (i == numNodes - 1 && validNodeIds.isEmpty())) { + nodeSettingsBuilder.put(validNodeSettings).put(Node.NODE_NAME_SETTING.getKey(), nodeName); + validNodeIds.add(nodeId); + } else { + nodeSettingsBuilder.put(invalidNodeSettings).put(Node.NODE_NAME_SETTING.getKey(), nodeName); + } + nodes.add(DiscoveryNode.createLocal(nodeSettingsBuilder.build(), new TransportAddress(TransportAddress.META_ADDRESS, nodePort), + nodeId)); + } + + assertNodeSelected(indexMetaData, index, validNodeIds, nodes); + } + + public void testPerformActionAttrsNoNodesValid() { + String[] validAttr = new String[] { "box_type", "valid" }; + String[] invalidAttr = new String[] { "box_type", "not_valid" }; + Settings.Builder indexSettings = settings(Version.CURRENT); + indexSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + validAttr[0], validAttr[1]); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(indexSettings) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Index index = indexMetaData.getIndex(); + Settings invalidNodeSettings = Settings.builder().put(Node.NODE_ATTRIBUTES.getKey() + invalidAttr[0], invalidAttr[1]).build(); + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); + int numNodes = randomIntBetween(1, 20); + for (int i = 0; i < numNodes; i++) { + String nodeId = "node_id_" + i; + String nodeName = "node_" + i; + int nodePort = 9300 + i; + Builder nodeSettingsBuilder = Settings.builder().put(invalidNodeSettings).put(Node.NODE_NAME_SETTING.getKey(), nodeName); + nodes.add(DiscoveryNode.createLocal(nodeSettingsBuilder.build(), new TransportAddress(TransportAddress.META_ADDRESS, nodePort), + nodeId)); + } + + assertNoValidNode(indexMetaData, index, nodes); + } + + public void testPerformActionAttrsRequestFails() { + int numAttrs = randomIntBetween(1, 10); + String[][] validAttrs = new String[numAttrs][2]; + for (int i = 0; i < numAttrs; i++) { + validAttrs[i] = new String[] { randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20) }; + } + Settings.Builder indexSettings = settings(Version.CURRENT); + for (String[] attr : validAttrs) { + indexSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + attr[0], attr[1]); + } + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(indexSettings) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Index index = indexMetaData.getIndex(); + Set validNodeIds = new HashSet<>(); + Settings validNodeSettings = Settings.EMPTY; + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); + int numNodes = randomIntBetween(1, 20); + for (int i = 0; i < numNodes; i++) { + String nodeId = "node_id_" + i; + String nodeName = "node_" + i; + int nodePort = 9300 + i; + String[] nodeAttr = randomFrom(validAttrs); + Settings nodeSettings = Settings.builder().put(validNodeSettings).put(Node.NODE_NAME_SETTING.getKey(), nodeName) + .put(Node.NODE_ATTRIBUTES.getKey() + nodeAttr[0], nodeAttr[1]).build(); + nodes.add(DiscoveryNode.createLocal(nodeSettings, new TransportAddress(TransportAddress.META_ADDRESS, nodePort), nodeId)); + validNodeIds.add(nodeId); + } + + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder().fPut(index.getName(), + indexMetaData); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node_id_0", true, ShardRoutingState.STARTED)); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).metaData(MetaData.builder().indices(indices.build())) + .nodes(nodes).routingTable(RoutingTable.builder().add(indexRoutingTable).build()).build(); + + SetSingleNodeAllocateStep step = createRandomInstance(); + Exception exception = new RuntimeException(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + UpdateSettingsRequest request = (UpdateSettingsRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + assertSettingsRequestContainsValueFrom(request, + IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id", validNodeIds, true, + indexMetaData.getIndex().getName()); + listener.onFailure(exception); + return null; + } + + }).when(indicesClient).updateSettings(Mockito.any(), Mockito.any()); + + SetOnce exceptionThrown = new SetOnce<>(); + step.performAction(indexMetaData, clusterState, new Listener() { + + @Override + public void onResponse(boolean complete) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertSame(exception, e); + exceptionThrown.set(true); + } + }); + + assertEquals(true, exceptionThrown.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).updateSettings(Mockito.any(), Mockito.any()); + } + + public void testPerformActionAttrsNoShard() { + int numAttrs = randomIntBetween(1, 10); + String[][] validAttrs = new String[numAttrs][2]; + for (int i = 0; i < numAttrs; i++) { + validAttrs[i] = new String[] { randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20) }; + } + Settings.Builder indexSettings = settings(Version.CURRENT); + for (String[] attr : validAttrs) { + indexSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + attr[0], attr[1]); + } + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(indexSettings) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Index index = indexMetaData.getIndex(); + Set validNodeIds = new HashSet<>(); + Settings validNodeSettings = Settings.EMPTY; + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); + int numNodes = randomIntBetween(1, 20); + for (int i = 0; i < numNodes; i++) { + String nodeId = "node_id_" + i; + String nodeName = "node_" + i; + int nodePort = 9300 + i; + String[] nodeAttr = randomFrom(validAttrs); + Settings nodeSettings = Settings.builder().put(validNodeSettings).put(Node.NODE_NAME_SETTING.getKey(), nodeName) + .put(Node.NODE_ATTRIBUTES.getKey() + nodeAttr[0], nodeAttr[1]).build(); + nodes.add(DiscoveryNode.createLocal(nodeSettings, new TransportAddress(TransportAddress.META_ADDRESS, nodePort), nodeId)); + validNodeIds.add(nodeId); + } + + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder().fPut(index.getName(), + indexMetaData); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).metaData(MetaData.builder().indices(indices.build())) + .nodes(nodes).routingTable(RoutingTable.builder().add(indexRoutingTable).build()).build(); + + SetSingleNodeAllocateStep step = createRandomInstance(); + + SetOnce exceptionThrown = new SetOnce<>(); + step.performAction(indexMetaData, clusterState, new Listener() { + + @Override + public void onResponse(boolean complete) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertThat(e, Matchers.instanceOf(IndexNotFoundException.class)); + assertEquals(indexMetaData.getIndex(), ((IndexNotFoundException) e).getIndex()); + exceptionThrown.set(true); + } + }); + + assertEquals(true, exceptionThrown.get()); + + Mockito.verifyZeroInteractions(client); + } + + private void assertNodeSelected(IndexMetaData indexMetaData, Index index, + Set validNodeIds, DiscoveryNodes.Builder nodes) throws IOException { + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder().fPut(index.getName(), + indexMetaData); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node_id_0", true, ShardRoutingState.STARTED)); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).metaData(MetaData.builder().indices(indices.build())) + .nodes(nodes).routingTable(RoutingTable.builder().add(indexRoutingTable).build()).build(); + + SetSingleNodeAllocateStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + UpdateSettingsRequest request = (UpdateSettingsRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + assertSettingsRequestContainsValueFrom(request, + IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id", validNodeIds, true, + indexMetaData.getIndex().getName()); + listener.onResponse(new AcknowledgedResponse(true)); + return null; + } + + }).when(indicesClient).updateSettings(Mockito.any(), Mockito.any()); + + SetOnce actionCompleted = new SetOnce<>(); + + step.performAction(indexMetaData, clusterState, new Listener() { + + @Override + public void onResponse(boolean complete) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertEquals(true, actionCompleted.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).updateSettings(Mockito.any(), Mockito.any()); + } + + private void assertNoValidNode(IndexMetaData indexMetaData, Index index, DiscoveryNodes.Builder nodes) { + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder().fPut(index.getName(), + indexMetaData); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node_id_0", true, ShardRoutingState.STARTED)); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).metaData(MetaData.builder().indices(indices.build())) + .nodes(nodes).routingTable(RoutingTable.builder().add(indexRoutingTable).build()).build(); + + SetSingleNodeAllocateStep step = createRandomInstance(); + + SetOnce actionCompleted = new SetOnce<>(); + + step.performAction(indexMetaData, clusterState, new Listener() { + + @Override + public void onResponse(boolean complete) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertEquals(false, actionCompleted.get()); + + Mockito.verifyZeroInteractions(client); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkActionTests.java new file mode 100644 index 0000000000000..037638ba56bff --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkActionTests.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class ShrinkActionTests extends AbstractActionTestCase { + + @Override + protected ShrinkAction doParseInstance(XContentParser parser) throws IOException { + return ShrinkAction.parse(parser); + } + + @Override + protected ShrinkAction createTestInstance() { + return randomInstance(); + } + + static ShrinkAction randomInstance() { + return new ShrinkAction(randomIntBetween(1, 100)); + } + + @Override + protected ShrinkAction mutateInstance(ShrinkAction action) { + return new ShrinkAction(action.getNumberOfShards() + randomIntBetween(1, 2)); + } + + @Override + protected Reader instanceReader() { + return ShrinkAction::new; + } + + public void testNonPositiveShardNumber() { + Exception e = expectThrows(Exception.class, () -> new ShrinkAction(randomIntBetween(-100, 0))); + assertThat(e.getMessage(), equalTo("[number_of_shards] must be greater than 0")); + } + + public void testToSteps() { + ShrinkAction action = createTestInstance(); + String phase = randomAlphaOfLengthBetween(1, 10); + StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10)); + List steps = action.toSteps(null, phase, nextStepKey); + assertThat(steps.size(), equalTo(8)); + StepKey expectedFirstKey = new StepKey(phase, ShrinkAction.NAME, ReadOnlyAction.NAME); + StepKey expectedSecondKey = new StepKey(phase, ShrinkAction.NAME, SetSingleNodeAllocateStep.NAME); + StepKey expectedThirdKey = new StepKey(phase, ShrinkAction.NAME, AllocationRoutedStep.NAME); + StepKey expectedFourthKey = new StepKey(phase, ShrinkAction.NAME, ShrinkStep.NAME); + StepKey expectedFifthKey = new StepKey(phase, ShrinkAction.NAME, ShrunkShardsAllocatedStep.NAME); + StepKey expectedSixthKey = new StepKey(phase, ShrinkAction.NAME, CopyExecutionStateStep.NAME); + StepKey expectedSeventhKey = new StepKey(phase, ShrinkAction.NAME, ShrinkSetAliasStep.NAME); + StepKey expectedEighthKey = new StepKey(phase, ShrinkAction.NAME, ShrunkenIndexCheckStep.NAME); + + assertTrue(steps.get(0) instanceof UpdateSettingsStep); + assertThat(steps.get(0).getKey(), equalTo(expectedFirstKey)); + assertThat(steps.get(0).getNextStepKey(), equalTo(expectedSecondKey)); + assertTrue(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.get(((UpdateSettingsStep)steps.get(0)).getSettings())); + + assertTrue(steps.get(1) instanceof SetSingleNodeAllocateStep); + assertThat(steps.get(1).getKey(), equalTo(expectedSecondKey)); + assertThat(steps.get(1).getNextStepKey(), equalTo(expectedThirdKey)); + + assertTrue(steps.get(2) instanceof AllocationRoutedStep); + assertThat(steps.get(2).getKey(), equalTo(expectedThirdKey)); + assertThat(steps.get(2).getNextStepKey(), equalTo(expectedFourthKey)); + assertThat(((AllocationRoutedStep) steps.get(2)).getWaitOnAllShardCopies(), equalTo(false)); + + assertTrue(steps.get(3) instanceof ShrinkStep); + assertThat(steps.get(3).getKey(), equalTo(expectedFourthKey)); + assertThat(steps.get(3).getNextStepKey(), equalTo(expectedFifthKey)); + assertThat(((ShrinkStep) steps.get(3)).getShrunkIndexPrefix(), equalTo(ShrinkAction.SHRUNKEN_INDEX_PREFIX)); + + assertTrue(steps.get(4) instanceof ShrunkShardsAllocatedStep); + assertThat(steps.get(4).getKey(), equalTo(expectedFifthKey)); + assertThat(steps.get(4).getNextStepKey(), equalTo(expectedSixthKey)); + assertThat(((ShrunkShardsAllocatedStep) steps.get(4)).getShrunkIndexPrefix(), equalTo(ShrinkAction.SHRUNKEN_INDEX_PREFIX)); + + assertTrue(steps.get(5) instanceof CopyExecutionStateStep); + assertThat(steps.get(5).getKey(), equalTo(expectedSixthKey)); + assertThat(steps.get(5).getNextStepKey(), equalTo(expectedSeventhKey)); + assertThat(((CopyExecutionStateStep) steps.get(5)).getShrunkIndexPrefix(), equalTo(ShrinkAction.SHRUNKEN_INDEX_PREFIX)); + + assertTrue(steps.get(6) instanceof ShrinkSetAliasStep); + assertThat(steps.get(6).getKey(), equalTo(expectedSeventhKey)); + assertThat(steps.get(6).getNextStepKey(), equalTo(expectedEighthKey)); + assertThat(((ShrinkSetAliasStep) steps.get(6)).getShrunkIndexPrefix(), equalTo(ShrinkAction.SHRUNKEN_INDEX_PREFIX)); + + assertTrue(steps.get(7) instanceof ShrunkenIndexCheckStep); + assertThat(steps.get(7).getKey(), equalTo(expectedEighthKey)); + assertThat(steps.get(7).getNextStepKey(), equalTo(nextStepKey)); + assertThat(((ShrunkenIndexCheckStep) steps.get(7)).getShrunkIndexPrefix(), equalTo(ShrinkAction.SHRUNKEN_INDEX_PREFIX)); + } + + @Override + protected boolean isSafeAction() { + return false; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStepTests.java new file mode 100644 index 0000000000000..5fcfcdeea38f0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStepTests.java @@ -0,0 +1,168 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.xpack.core.indexlifecycle.AsyncActionStep.Listener; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.junit.Before; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.util.Arrays; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class ShrinkSetAliasStepTests extends AbstractStepTestCase { + + private Client client; + + @Before + public void setup() { + client = Mockito.mock(Client.class); + } + + @Override + public ShrinkSetAliasStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + String shrunkIndexPrefix = randomAlphaOfLength(10); + return new ShrinkSetAliasStep(stepKey, nextStepKey, client, shrunkIndexPrefix); + } + + @Override + public ShrinkSetAliasStep mutateInstance(ShrinkSetAliasStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + String shrunkIndexPrefix = instance.getShrunkIndexPrefix(); + switch (between(0, 2)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + shrunkIndexPrefix += randomAlphaOfLength(5); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new ShrinkSetAliasStep(key, nextKey, instance.getClient(), shrunkIndexPrefix); + } + + @Override + public ShrinkSetAliasStep copyInstance(ShrinkSetAliasStep instance) { + return new ShrinkSetAliasStep(instance.getKey(), instance.getNextStepKey(), instance.getClient(), instance.getShrunkIndexPrefix()); + } + + public void testPerformAction() { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + ShrinkSetAliasStep step = createRandomInstance(); + + String sourceIndex = indexMetaData.getIndex().getName(); + String shrunkenIndex = step.getShrunkIndexPrefix() + sourceIndex; + List expectedAliasActions = Arrays.asList( + IndicesAliasesRequest.AliasActions.removeIndex().index(sourceIndex), + IndicesAliasesRequest.AliasActions.add().index(shrunkenIndex).alias(sourceIndex)); + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + IndicesAliasesRequest request = (IndicesAliasesRequest) invocation.getArguments()[0]; + assertThat(request.getAliasActions(), equalTo(expectedAliasActions)); + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(new AcknowledgedResponse(true)); + return null; + } + + }).when(indicesClient).aliases(Mockito.any(), Mockito.any()); + + SetOnce actionCompleted = new SetOnce<>(); + step.performAction(indexMetaData, null, new Listener() { + + @Override + public void onResponse(boolean complete) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertTrue(actionCompleted.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).aliases(Mockito.any(), Mockito.any()); + } + + public void testPerformActionFailure() { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Exception exception = new RuntimeException(); + ShrinkSetAliasStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onFailure(exception); + return null; + } + + }).when(indicesClient).aliases(Mockito.any(), Mockito.any()); + + SetOnce exceptionThrown = new SetOnce<>(); + step.performAction(indexMetaData, null, new Listener() { + + @Override + public void onResponse(boolean complete) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertSame(exception, e); + exceptionThrown.set(true); + } + }); + + assertEquals(true, exceptionThrown.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).aliases(Mockito.any(), Mockito.any()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStepTests.java new file mode 100644 index 0000000000000..472c22025e195 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStepTests.java @@ -0,0 +1,245 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; +import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; +import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.indexlifecycle.AsyncActionStep.Listener; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.junit.Before; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.util.Collections; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.hamcrest.Matchers.equalTo; + +public class ShrinkStepTests extends AbstractStepTestCase { + + private Client client; + + @Before + public void setup() { + client = Mockito.mock(Client.class); + } + + @Override + public ShrinkStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + int numberOfShards = randomIntBetween(1, 20); + String shrunkIndexPrefix = randomAlphaOfLength(10); + return new ShrinkStep(stepKey, nextStepKey, client, numberOfShards, shrunkIndexPrefix); + } + + @Override + public ShrinkStep mutateInstance(ShrinkStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + int numberOfShards = instance.getNumberOfShards(); + String shrunkIndexPrefix = instance.getShrunkIndexPrefix(); + + switch (between(0, 3)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + numberOfShards = numberOfShards + 1; + break; + case 3: + shrunkIndexPrefix += randomAlphaOfLength(5); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new ShrinkStep(key, nextKey, instance.getClient(), numberOfShards, shrunkIndexPrefix); + } + + @Override + public ShrinkStep copyInstance(ShrinkStep instance) { + return new ShrinkStep(instance.getKey(), instance.getNextStepKey(), instance.getClient(), instance.getNumberOfShards(), + instance.getShrunkIndexPrefix()); + } + + public void testPerformAction() throws Exception { + String lifecycleName = randomAlphaOfLength(5); + ShrinkStep step = createRandomInstance(); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(step.getKey().getPhase()); + lifecycleState.setAction(step.getKey().getAction()); + lifecycleState.setStep(step.getKey().getName()); + lifecycleState.setIndexCreationDate(randomNonNegativeLong()); + IndexMetaData sourceIndexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .settings(settings(Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, lifecycleName) + ) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)) + .putAlias(AliasMetaData.builder("my_alias")) + .build(); + + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + ResizeRequest request = (ResizeRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + assertThat(request.getSourceIndex(), equalTo(sourceIndexMetaData.getIndex().getName())); + assertThat(request.getTargetIndexRequest().aliases(), equalTo(Collections.singleton(new Alias("my_alias")))); + assertThat(request.getTargetIndexRequest().settings(), equalTo(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, step.getNumberOfShards()) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, sourceIndexMetaData.getNumberOfReplicas()) + .put(LifecycleSettings.LIFECYCLE_NAME, lifecycleName) + .put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id", (String) null) + .build())); + assertThat(request.getTargetIndexRequest().settings() + .getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, -1), equalTo(step.getNumberOfShards())); + listener.onResponse(new ResizeResponse(true, true, sourceIndexMetaData.getIndex().getName())); + return null; + } + + }).when(indicesClient).resizeIndex(Mockito.any(), Mockito.any()); + + SetOnce actionCompleted = new SetOnce<>(); + step.performAction(sourceIndexMetaData, null, new Listener() { + + @Override + public void onResponse(boolean complete) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertEquals(true, actionCompleted.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).resizeIndex(Mockito.any(), Mockito.any()); + } + + public void testPerformActionNotComplete() throws Exception { + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setIndexCreationDate(randomNonNegativeLong()); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + ShrinkStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(new ResizeResponse(false, false, indexMetaData.getIndex().getName())); + return null; + } + + }).when(indicesClient).resizeIndex(Mockito.any(), Mockito.any()); + + SetOnce actionCompleted = new SetOnce<>(); + step.performAction(indexMetaData, null, new Listener() { + + @Override + public void onResponse(boolean complete) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertEquals(false, actionCompleted.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).resizeIndex(Mockito.any(), Mockito.any()); + } + + public void testPerformActionFailure() throws Exception { + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setIndexCreationDate(randomNonNegativeLong()); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Exception exception = new RuntimeException(); + ShrinkStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onFailure(exception); + return null; + } + + }).when(indicesClient).resizeIndex(Mockito.any(), Mockito.any()); + + SetOnce exceptionThrown = new SetOnce<>(); + step.performAction(indexMetaData, null, new Listener() { + + @Override + public void onResponse(boolean complete) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertSame(exception, e); + exceptionThrown.set(true); + } + }); + + assertEquals(true, exceptionThrown.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).resizeIndex(Mockito.any(), Mockito.any()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStepInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStepInfoTests.java new file mode 100644 index 0000000000000..1ff0e04531302 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStepInfoTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.xpack.core.indexlifecycle.ShrunkShardsAllocatedStep.Info; + +import java.io.IOException; + +public class ShrunkShardsAllocatedStepInfoTests extends AbstractXContentTestCase { + + @Override + protected Info createTestInstance() { + return new Info(randomBoolean(), randomIntBetween(0, 10000), randomBoolean()); + } + + @Override + protected Info doParseInstance(XContentParser parser) throws IOException { + return Info.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public final void testEqualsAndHashcode() { + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), this::copyInstance, this::mutateInstance); + } + } + + protected final Info copyInstance(Info instance) throws IOException { + return new Info(instance.shrunkIndexExists(), instance.getActualShards(), instance.allShardsActive()); + } + + protected Info mutateInstance(Info instance) throws IOException { + boolean shrunkIndexExists = instance.shrunkIndexExists(); + int actualShards = instance.getActualShards(); + boolean allShardsActive = instance.allShardsActive(); + switch (between(0, 2)) { + case 0: + shrunkIndexExists = shrunkIndexExists == false; + break; + case 1: + actualShards += between(1, 20); + break; + case 2: + allShardsActive = allShardsActive == false; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new Info(shrunkIndexExists, actualShards, allShardsActive); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStepTests.java new file mode 100644 index 0000000000000..272b50499d7fd --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStepTests.java @@ -0,0 +1,171 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.node.Node; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateWaitStep.Result; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +public class ShrunkShardsAllocatedStepTests extends AbstractStepTestCase { + + @Override + public ShrunkShardsAllocatedStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + String shrunkIndexPrefix = randomAlphaOfLength(10); + return new ShrunkShardsAllocatedStep(stepKey, nextStepKey, shrunkIndexPrefix); + } + + @Override + public ShrunkShardsAllocatedStep mutateInstance(ShrunkShardsAllocatedStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + String shrunkIndexPrefix = instance.getShrunkIndexPrefix(); + + switch (between(0, 2)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + shrunkIndexPrefix += randomAlphaOfLength(5); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new ShrunkShardsAllocatedStep(key, nextKey, shrunkIndexPrefix); + } + + @Override + public ShrunkShardsAllocatedStep copyInstance(ShrunkShardsAllocatedStep instance) { + return new ShrunkShardsAllocatedStep(instance.getKey(), instance.getNextStepKey(), instance.getShrunkIndexPrefix()); + } + + public void testConditionMet() { + ShrunkShardsAllocatedStep step = createRandomInstance(); + int shrinkNumberOfShards = randomIntBetween(1, 5); + int originalNumberOfShards = randomIntBetween(1, 5); + String originalIndexName = randomAlphaOfLength(5); + IndexMetaData originalIndexMetadata = IndexMetaData.builder(originalIndexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(originalNumberOfShards) + .numberOfReplicas(0).build(); + IndexMetaData shrunkIndexMetadata = IndexMetaData.builder(step.getShrunkIndexPrefix() + originalIndexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(shrinkNumberOfShards) + .numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(originalIndexMetadata)) + .put(IndexMetaData.builder(shrunkIndexMetadata)) + .build(); + Index shrinkIndex = shrunkIndexMetadata.getIndex(); + + String nodeId = randomAlphaOfLength(10); + DiscoveryNode masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT) + .put(Node.NODE_MASTER_SETTING.getKey(), true).build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId); + + IndexRoutingTable.Builder builder = IndexRoutingTable.builder(shrinkIndex); + for (int i = 0; i < shrinkNumberOfShards; i++) { + builder.addShard(TestShardRouting.newShardRouting(new ShardId(shrinkIndex, i), + nodeId, true, ShardRoutingState.STARTED)); + } + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .routingTable(RoutingTable.builder().add(builder.build()).build()).build(); + + Result result = step.isConditionMet(originalIndexMetadata.getIndex(), clusterState); + assertTrue(result.isComplete()); + assertNull(result.getInfomationContext()); + } + + public void testConditionNotMetBecauseOfActive() { + ShrunkShardsAllocatedStep step = createRandomInstance(); + int shrinkNumberOfShards = randomIntBetween(1, 5); + int originalNumberOfShards = randomIntBetween(1, 5); + String originalIndexName = randomAlphaOfLength(5); + IndexMetaData originalIndexMetadata = IndexMetaData.builder(originalIndexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(originalNumberOfShards) + .numberOfReplicas(0).build(); + IndexMetaData shrunkIndexMetadata = IndexMetaData.builder(step.getShrunkIndexPrefix() + originalIndexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(shrinkNumberOfShards) + .numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(originalIndexMetadata)) + .put(IndexMetaData.builder(shrunkIndexMetadata)) + .build(); + Index shrinkIndex = shrunkIndexMetadata.getIndex(); + + String nodeId = randomAlphaOfLength(10); + DiscoveryNode masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT) + .put(Node.NODE_MASTER_SETTING.getKey(), true).build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId); + + IndexRoutingTable.Builder builder = IndexRoutingTable.builder(shrinkIndex); + for (int i = 0; i < shrinkNumberOfShards; i++) { + builder.addShard(TestShardRouting.newShardRouting(new ShardId(shrinkIndex, i), + nodeId, true, ShardRoutingState.INITIALIZING)); + } + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .routingTable(RoutingTable.builder().add(builder.build()).build()).build(); + + Result result = step.isConditionMet(originalIndexMetadata.getIndex(), clusterState); + assertFalse(result.isComplete()); + assertEquals(new ShrunkShardsAllocatedStep.Info(true, shrinkNumberOfShards, false), + result.getInfomationContext()); + } + + public void testConditionNotMetBecauseOfShrunkIndexDoesntExistYet() { + ShrunkShardsAllocatedStep step = createRandomInstance(); + int originalNumberOfShards = randomIntBetween(1, 5); + String originalIndexName = randomAlphaOfLength(5); + IndexMetaData originalIndexMetadata = IndexMetaData.builder(originalIndexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(originalNumberOfShards) + .numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(originalIndexMetadata)) + .build(); + + String nodeId = randomAlphaOfLength(10); + DiscoveryNode masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT) + .put(Node.NODE_MASTER_SETTING.getKey(), true).build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + + Result result = step.isConditionMet(originalIndexMetadata.getIndex(), clusterState); + assertFalse(result.isComplete()); + assertEquals(new ShrunkShardsAllocatedStep.Info(false, -1, false), result.getInfomationContext()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStepInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStepInfoTests.java new file mode 100644 index 0000000000000..b4a9021af214e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStepInfoTests.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.xpack.core.indexlifecycle.ShrunkenIndexCheckStep.Info; + +import java.io.IOException; + +public class ShrunkenIndexCheckStepInfoTests extends AbstractXContentTestCase { + + @Override + protected Info createTestInstance() { + return new Info(randomAlphaOfLengthBetween(10, 20)); + } + + @Override + protected Info doParseInstance(XContentParser parser) throws IOException { + return Info.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public final void testEqualsAndHashcode() { + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), this::copyInstance, this::mutateInstance); + } + } + + protected final Info copyInstance(Info instance) throws IOException { + return new Info(instance.getOriginalIndexName()); + } + + protected Info mutateInstance(Info instance) throws IOException { + return new Info(randomValueOtherThan(instance.getOriginalIndexName(), () -> randomAlphaOfLengthBetween(10, 20))); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStepTests.java new file mode 100644 index 0000000000000..b7268ca9b259f --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStepTests.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateWaitStep.Result; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import static org.hamcrest.Matchers.equalTo; + +public class ShrunkenIndexCheckStepTests extends AbstractStepTestCase { + + @Override + public ShrunkenIndexCheckStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + String shrunkIndexPrefix = randomAlphaOfLength(10); + return new ShrunkenIndexCheckStep(stepKey, nextStepKey, shrunkIndexPrefix); + } + + @Override + public ShrunkenIndexCheckStep mutateInstance(ShrunkenIndexCheckStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + String shrunkIndexPrefix = instance.getShrunkIndexPrefix(); + switch (between(0, 2)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + shrunkIndexPrefix += randomAlphaOfLength(5); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new ShrunkenIndexCheckStep(key, nextKey, shrunkIndexPrefix); + } + + @Override + public ShrunkenIndexCheckStep copyInstance(ShrunkenIndexCheckStep instance) { + return new ShrunkenIndexCheckStep(instance.getKey(), instance.getNextStepKey(), instance.getShrunkIndexPrefix()); + } + + public void testConditionMet() { + ShrunkenIndexCheckStep step = createRandomInstance(); + String sourceIndex = randomAlphaOfLengthBetween(1, 10); + IndexMetaData indexMetadata = IndexMetaData.builder(step.getShrunkIndexPrefix() + sourceIndex) + .settings(settings(Version.CURRENT).put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME_KEY, sourceIndex)) + .numberOfShards(1) + .numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(indexMetadata)) + .build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); + assertTrue(result.isComplete()); + assertNull(result.getInfomationContext()); + } + + public void testConditionNotMetBecauseNotSameShrunkenIndex() { + ShrunkenIndexCheckStep step = createRandomInstance(); + String sourceIndex = randomAlphaOfLengthBetween(1, 10); + IndexMetaData shrinkIndexMetadata = IndexMetaData.builder(sourceIndex + "hello") + .settings(settings(Version.CURRENT).put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME_KEY, sourceIndex)) + .numberOfShards(1) + .numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(shrinkIndexMetadata)) + .build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + Result result = step.isConditionMet(shrinkIndexMetadata.getIndex(), clusterState); + assertFalse(result.isComplete()); + assertEquals(new ShrunkenIndexCheckStep.Info(sourceIndex), result.getInfomationContext()); + } + + public void testConditionNotMetBecauseSourceIndexExists() { + ShrunkenIndexCheckStep step = createRandomInstance(); + String sourceIndex = randomAlphaOfLengthBetween(1, 10); + IndexMetaData originalIndexMetadata = IndexMetaData.builder(sourceIndex) + .settings(settings(Version.CURRENT)) + .numberOfShards(100) + .numberOfReplicas(0).build(); + IndexMetaData shrinkIndexMetadata = IndexMetaData.builder(step.getShrunkIndexPrefix() + sourceIndex) + .settings(settings(Version.CURRENT).put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME_KEY, sourceIndex)) + .numberOfShards(1) + .numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(originalIndexMetadata)) + .put(IndexMetaData.builder(shrinkIndexMetadata)) + .build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + Result result = step.isConditionMet(shrinkIndexMetadata.getIndex(), clusterState); + assertFalse(result.isComplete()); + assertEquals(new ShrunkenIndexCheckStep.Info(sourceIndex), result.getInfomationContext()); + } + + public void testIllegalState() { + ShrunkenIndexCheckStep step = createRandomInstance(); + IndexMetaData indexMetadata = IndexMetaData.builder(randomAlphaOfLength(5)) + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(indexMetadata)) + .build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + IllegalStateException exception = expectThrows(IllegalStateException.class, + () -> step.isConditionMet(indexMetadata.getIndex(), clusterState)); + assertThat(exception.getMessage(), + equalTo("step[is-shrunken-index] is checking an un-shrunken index[" + indexMetadata.getIndex().getName() + "]")); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequestTests.java new file mode 100644 index 0000000000000..4c61f3016a13e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequestTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.test.AbstractStreamableTestCase; + +public class StartILMRequestTests extends AbstractStreamableTestCase { + + @Override + protected StartILMRequest createBlankInstance() { + return new StartILMRequest(); + } + + @Override + protected StartILMRequest createTestInstance() { + return new StartILMRequest(); + } + + public void testValidate() { + StartILMRequest request = createTestInstance(); + assertNull(request.validate()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StepKeyTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StepKeyTests.java new file mode 100644 index 0000000000000..ae90a150b7c4c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StepKeyTests.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +public class StepKeyTests extends AbstractSerializingTestCase { + + @Override + public StepKey createTestInstance() { + return new StepKey(randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + + @Override + protected Writeable.Reader instanceReader() { + return StepKey::new; + } + + @Override + protected StepKey doParseInstance(XContentParser parser) { + return StepKey.parse(parser); + } + + @Override + public StepKey mutateInstance(StepKey instance) { + String phase = instance.getPhase(); + String action = instance.getAction(); + String step = instance.getName(); + + switch (between(0, 2)) { + case 0: + phase += randomAlphaOfLength(5); + break; + case 1: + action += randomAlphaOfLength(5); + break; + case 2: + step += randomAlphaOfLength(5); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new StepKey(phase, action, step); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequestTests.java new file mode 100644 index 0000000000000..be603ee33acc1 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequestTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.test.AbstractStreamableTestCase; + +public class StopILMRequestTests extends AbstractStreamableTestCase { + + @Override + protected StopILMRequest createBlankInstance() { + return new StopILMRequest(); + } + + @Override + protected StopILMRequest createTestInstance() { + return new StopILMRequest(); + } + + public void testValidate() { + StopILMRequest request = createTestInstance(); + assertNull(request.validate()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TerminalPolicyStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TerminalPolicyStepTests.java new file mode 100644 index 0000000000000..1db1523b5cd82 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TerminalPolicyStepTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +public class TerminalPolicyStepTests extends AbstractStepTestCase { + + @Override + public TerminalPolicyStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + return new TerminalPolicyStep(stepKey, nextStepKey); + } + + @Override + public TerminalPolicyStep mutateInstance(TerminalPolicyStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + + if (randomBoolean()) { + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + } else { + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + } + + return new TerminalPolicyStep(key, nextKey); + } + + @Override + public TerminalPolicyStep copyInstance(TerminalPolicyStep instance) { + return new TerminalPolicyStep(instance.getKey(), instance.getNextStepKey()); + } + public void testInstance() { + assertEquals(new Step.StepKey("completed", "completed", "completed"), TerminalPolicyStep.INSTANCE.getKey()); + assertNull(TerminalPolicyStep.INSTANCE.getNextStepKey()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TestLifecycleType.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TestLifecycleType.java new file mode 100644 index 0000000000000..f68798e9331d1 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TestLifecycleType.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public class TestLifecycleType implements LifecycleType { + public static final TestLifecycleType INSTANCE = new TestLifecycleType(); + + public static final String TYPE = "test"; + + private TestLifecycleType() { + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public void validate(Collection phases) { + // always valid + } + + @Override + public List getOrderedPhases(Map phases) { + return new ArrayList<>(phases.values()); + } + + @Override + public String getNextPhaseName(String currentPhaseName, Map phases) { + List orderedPhaseNames = getOrderedPhases(phases).stream().map(Phase::getName).collect(Collectors.toList()); + int index = orderedPhaseNames.indexOf(currentPhaseName); + if (index < 0) { + throw new IllegalArgumentException("[" + currentPhaseName + "] is not a valid phase for lifecycle type [" + TYPE + "]"); + } else if (index == orderedPhaseNames.size() - 1) { + return null; + } else { + return orderedPhaseNames.get(index + 1); + } + } + + @Override + public String getPreviousPhaseName(String currentPhaseName, Map phases) { + List orderedPhaseNames = getOrderedPhases(phases).stream().map(Phase::getName).collect(Collectors.toList()); + int index = orderedPhaseNames.indexOf(currentPhaseName); + if (index < 0) { + throw new IllegalArgumentException("[" + currentPhaseName + "] is not a valid phase for lifecycle type [" + TYPE + "]"); + } else if (index == 0) { + return null; + } else { + return orderedPhaseNames.get(index - 1); + } + } + + @Override + public List getOrderedActions(Phase phase) { + return new ArrayList<>(phase.getActions().values()); + } + + @Override + public String getNextActionName(String currentActionName, Phase phase) { + List orderedActionNames = getOrderedActions(phase).stream().map(LifecycleAction::getWriteableName) + .collect(Collectors.toList()); + int index = orderedActionNames.indexOf(currentActionName); + if (index < 0) { + throw new IllegalArgumentException("[" + currentActionName + "] is not a valid action for phase [" + phase.getName() + + "] in lifecycle type [" + TYPE + "]"); + } else if (index == orderedActionNames.size() - 1) { + return null; + } else { + return orderedActionNames.get(index + 1); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleTypeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleTypeTests.java new file mode 100644 index 0000000000000..2f0c2f8d18b33 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleTypeTests.java @@ -0,0 +1,483 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentMap; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.ORDERED_VALID_COLD_ACTIONS; +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.ORDERED_VALID_DELETE_ACTIONS; +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.ORDERED_VALID_HOT_ACTIONS; +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.ORDERED_VALID_WARM_ACTIONS; +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.VALID_COLD_ACTIONS; +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.VALID_DELETE_ACTIONS; +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.VALID_HOT_ACTIONS; +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.VALID_PHASES; +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.VALID_WARM_ACTIONS; +import static org.hamcrest.Matchers.equalTo; + +public class TimeseriesLifecycleTypeTests extends ESTestCase { + + private static final AllocateAction TEST_ALLOCATE_ACTION = + new AllocateAction(2, Collections.singletonMap("node", "node1"),null, null); + private static final DeleteAction TEST_DELETE_ACTION = new DeleteAction(); + private static final ForceMergeAction TEST_FORCE_MERGE_ACTION = new ForceMergeAction(1); + private static final RolloverAction TEST_ROLLOVER_ACTION = new RolloverAction(new ByteSizeValue(1), null, null); + private static final ShrinkAction TEST_SHRINK_ACTION = new ShrinkAction(1); + private static final ReadOnlyAction TEST_READ_ONLY_ACTION = new ReadOnlyAction(); + + public void testValidatePhases() { + boolean invalid = randomBoolean(); + String phaseName = randomFrom("hot", "warm", "cold", "delete"); + if (invalid) { + phaseName += randomAlphaOfLength(5); + } + Map phases = Collections.singletonMap(phaseName, + new Phase(phaseName, TimeValue.ZERO, Collections.emptyMap())); + if (invalid) { + Exception e = expectThrows(IllegalArgumentException.class, () -> TimeseriesLifecycleType.INSTANCE.validate(phases.values())); + assertThat(e.getMessage(), equalTo("Timeseries lifecycle does not support phase [" + phaseName + "]")); + } else { + TimeseriesLifecycleType.INSTANCE.validate(phases.values()); + } + } + + public void testValidateHotPhase() { + LifecycleAction invalidAction = null; + Map actions = VALID_HOT_ACTIONS + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("allocate", "forcemerge", "delete", "shrink")); + actions.put(invalidAction.getWriteableName(), invalidAction); + } + Map hotPhase = Collections.singletonMap("hot", + new Phase("hot", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> TimeseriesLifecycleType.INSTANCE.validate(hotPhase.values())); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getWriteableName() + "] defined in phase [hot]")); + } else { + TimeseriesLifecycleType.INSTANCE.validate(hotPhase.values()); + } + } + + public void testValidateWarmPhase() { + LifecycleAction invalidAction = null; + Map actions = randomSubsetOf(VALID_WARM_ACTIONS) + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("rollover", "delete")); + actions.put(invalidAction.getWriteableName(), invalidAction); + } + Map warmPhase = Collections.singletonMap("warm", + new Phase("warm", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> TimeseriesLifecycleType.INSTANCE.validate(warmPhase.values())); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getWriteableName() + "] defined in phase [warm]")); + } else { + TimeseriesLifecycleType.INSTANCE.validate(warmPhase.values()); + } + } + + public void testValidateColdPhase() { + LifecycleAction invalidAction = null; + Map actions = randomSubsetOf(VALID_COLD_ACTIONS) + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("rollover", "delete", "forcemerge", "shrink")); + actions.put(invalidAction.getWriteableName(), invalidAction); + } + Map coldPhase = Collections.singletonMap("cold", + new Phase("cold", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> TimeseriesLifecycleType.INSTANCE.validate(coldPhase.values())); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getWriteableName() + "] defined in phase [cold]")); + } else { + TimeseriesLifecycleType.INSTANCE.validate(coldPhase.values()); + } + } + + public void testValidateDeletePhase() { + LifecycleAction invalidAction = null; + Map actions = VALID_DELETE_ACTIONS + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("allocate", "rollover", "forcemerge", "shrink")); + actions.put(invalidAction.getWriteableName(), invalidAction); + } + Map deletePhase = Collections.singletonMap("delete", + new Phase("delete", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> TimeseriesLifecycleType.INSTANCE.validate(deletePhase.values())); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getWriteableName() + "] defined in phase [delete]")); + } else { + TimeseriesLifecycleType.INSTANCE.validate(deletePhase.values()); + } + } + + public void testGetOrderedPhases() { + Map phaseMap = new HashMap<>(); + for (String phaseName : randomSubsetOf(randomIntBetween(0, VALID_PHASES.size()), VALID_PHASES)) { + phaseMap.put(phaseName, new Phase(phaseName, TimeValue.ZERO, Collections.emptyMap())); + } + + + assertTrue(isSorted(TimeseriesLifecycleType.INSTANCE.getOrderedPhases(phaseMap), Phase::getName, VALID_PHASES)); + } + + + public void testGetOrderedActionsInvalidPhase() { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> TimeseriesLifecycleType.INSTANCE + .getOrderedActions(new Phase("invalid", TimeValue.ZERO, Collections.emptyMap()))); + assertThat(exception.getMessage(), equalTo("lifecycle type[timeseries] does not support phase[invalid]")); + } + + public void testGetOrderedActionsHot() { + Map actions = VALID_HOT_ACTIONS + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); + Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); + List orderedActions = TimeseriesLifecycleType.INSTANCE.getOrderedActions(hotPhase); + assertTrue(isSorted(orderedActions, LifecycleAction::getWriteableName, ORDERED_VALID_HOT_ACTIONS)); + } + + public void testGetOrderedActionsWarm() { + Map actions = VALID_WARM_ACTIONS + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); + Phase warmPhase = new Phase("warm", TimeValue.ZERO, actions); + List orderedActions = TimeseriesLifecycleType.INSTANCE.getOrderedActions(warmPhase); + assertTrue(isSorted(orderedActions, LifecycleAction::getWriteableName, ORDERED_VALID_WARM_ACTIONS)); + } + + public void testGetOrderedActionsCold() { + Map actions = VALID_COLD_ACTIONS + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); + Phase coldPhase = new Phase("cold", TimeValue.ZERO, actions); + List orderedActions = TimeseriesLifecycleType.INSTANCE.getOrderedActions(coldPhase); + assertTrue(isSorted(orderedActions, LifecycleAction::getWriteableName, ORDERED_VALID_COLD_ACTIONS)); + } + + public void testGetOrderedActionsDelete() { + Map actions = VALID_DELETE_ACTIONS + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); + Phase deletePhase = new Phase("delete", TimeValue.ZERO, actions); + List orderedActions = TimeseriesLifecycleType.INSTANCE.getOrderedActions(deletePhase); + assertTrue(isSorted(orderedActions, LifecycleAction::getWriteableName, ORDERED_VALID_DELETE_ACTIONS)); + } + + public void testGetNextPhaseName() { + assertNextPhaseName("hot", "warm", new String[] { "hot", "warm" }); + assertNextPhaseName("hot", "warm", new String[] { "hot", "warm", "cold" }); + assertNextPhaseName("hot", "warm", new String[] { "hot", "warm", "cold", "delete" }); + assertNextPhaseName("hot", "warm", new String[] { "warm", "cold", "delete" }); + assertNextPhaseName("hot", "warm", new String[] { "warm", "cold", "delete" }); + assertNextPhaseName("hot", "warm", new String[] { "warm", "delete" }); + assertNextPhaseName("hot", "cold", new String[] { "cold", "delete" }); + assertNextPhaseName("hot", "cold", new String[] { "cold" }); + assertNextPhaseName("hot", "delete", new String[] { "hot", "delete" }); + assertNextPhaseName("hot", "delete", new String[] { "delete" }); + assertNextPhaseName("hot", null, new String[] { "hot" }); + assertNextPhaseName("hot", null, new String[] {}); + + assertNextPhaseName("warm", "cold", new String[] { "hot", "warm", "cold", "delete" }); + assertNextPhaseName("warm", "cold", new String[] { "warm", "cold", "delete" }); + assertNextPhaseName("warm", "cold", new String[] { "cold", "delete" }); + assertNextPhaseName("warm", "cold", new String[] { "cold" }); + assertNextPhaseName("warm", "delete", new String[] { "hot", "warm", "delete" }); + assertNextPhaseName("warm", null, new String[] { "hot", "warm" }); + assertNextPhaseName("warm", null, new String[] { "warm" }); + assertNextPhaseName("warm", null, new String[] { "hot" }); + assertNextPhaseName("warm", null, new String[] {}); + + assertNextPhaseName("cold", "delete", new String[] { "hot", "warm", "cold", "delete" }); + assertNextPhaseName("cold", "delete", new String[] { "warm", "cold", "delete" }); + assertNextPhaseName("cold", "delete", new String[] { "cold", "delete" }); + assertNextPhaseName("cold", "delete", new String[] { "delete" }); + assertNextPhaseName("cold", "delete", new String[] { "hot", "warm", "delete" }); + assertNextPhaseName("cold", null, new String[] { "hot", "warm", "cold" }); + assertNextPhaseName("cold", null, new String[] { "hot", "warm" }); + assertNextPhaseName("cold", null, new String[] { "cold" }); + assertNextPhaseName("cold", null, new String[] { "hot" }); + assertNextPhaseName("cold", null, new String[] {}); + + assertNextPhaseName("delete", null, new String[] { "hot", "warm", "cold" }); + assertNextPhaseName("delete", null, new String[] { "hot", "warm" }); + assertNextPhaseName("delete", null, new String[] { "cold" }); + assertNextPhaseName("delete", null, new String[] { "hot" }); + assertNextPhaseName("delete", null, new String[] {}); + assertNextPhaseName("delete", null, new String[] { "hot", "warm", "cold", "delete" }); + assertNextPhaseName("delete", null, new String[] { "hot", "warm", "delete" }); + assertNextPhaseName("delete", null, new String[] { "cold", "delete" }); + assertNextPhaseName("delete", null, new String[] { "delete" }); + assertNextPhaseName("delete", null, new String[] {}); + + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> TimeseriesLifecycleType.INSTANCE.getNextPhaseName("foo", Collections.emptyMap())); + assertEquals("[foo] is not a valid phase for lifecycle type [" + TimeseriesLifecycleType.TYPE + "]", exception.getMessage()); + exception = expectThrows(IllegalArgumentException.class, () -> TimeseriesLifecycleType.INSTANCE + .getNextPhaseName("foo", Collections.singletonMap("foo", new Phase("foo", TimeValue.ZERO, Collections.emptyMap())))); + assertEquals("[foo] is not a valid phase for lifecycle type [" + TimeseriesLifecycleType.TYPE + "]", exception.getMessage()); + } + + public void testGetPreviousPhaseName() { + assertPreviousPhaseName("hot", null, new String[] { "hot", "warm" }); + assertPreviousPhaseName("hot", null, new String[] { "hot", "warm", "cold" }); + assertPreviousPhaseName("hot", null, new String[] { "hot", "warm", "cold", "delete" }); + assertPreviousPhaseName("hot", null, new String[] { "warm", "cold", "delete" }); + assertPreviousPhaseName("hot", null, new String[] { "warm", "delete" }); + assertPreviousPhaseName("hot", null, new String[] { "cold", "delete" }); + assertPreviousPhaseName("hot", null, new String[] { "cold" }); + assertPreviousPhaseName("hot", null, new String[] { "hot", "delete" }); + assertPreviousPhaseName("hot", null, new String[] { "delete" }); + assertPreviousPhaseName("hot", null, new String[] { "hot" }); + assertPreviousPhaseName("hot", null, new String[] {}); + + assertPreviousPhaseName("warm", "hot", new String[] { "hot", "warm", "cold", "delete" }); + assertPreviousPhaseName("warm", null, new String[] { "warm", "cold", "delete" }); + assertPreviousPhaseName("warm", "hot", new String[] { "hot", "cold", "delete" }); + assertPreviousPhaseName("warm", null, new String[] { "cold", "delete" }); + assertPreviousPhaseName("warm", "hot", new String[] { "hot", "delete" }); + assertPreviousPhaseName("warm", null, new String[] { "delete" }); + assertPreviousPhaseName("warm", "hot", new String[] { "hot" }); + assertPreviousPhaseName("warm", null, new String[] {}); + + assertPreviousPhaseName("cold", "warm", new String[] { "hot", "warm", "cold", "delete" }); + assertPreviousPhaseName("cold", "hot", new String[] { "hot", "cold", "delete" }); + assertPreviousPhaseName("cold", "warm", new String[] { "warm", "cold", "delete" }); + assertPreviousPhaseName("cold", null, new String[] { "cold", "delete" }); + assertPreviousPhaseName("cold", "warm", new String[] { "hot", "warm", "delete" }); + assertPreviousPhaseName("cold", "hot", new String[] { "hot", "delete" }); + assertPreviousPhaseName("cold", "warm", new String[] { "warm", "delete" }); + assertPreviousPhaseName("cold", null, new String[] { "delete" }); + assertPreviousPhaseName("cold", "warm", new String[] { "hot", "warm" }); + assertPreviousPhaseName("cold", "hot", new String[] { "hot" }); + assertPreviousPhaseName("cold", "warm", new String[] { "warm" }); + assertPreviousPhaseName("cold", null, new String[] {}); + + assertPreviousPhaseName("delete", "cold", new String[] { "hot", "warm", "cold", "delete" }); + assertPreviousPhaseName("delete", "cold", new String[] { "warm", "cold", "delete" }); + assertPreviousPhaseName("delete", "warm", new String[] { "hot", "warm", "delete" }); + assertPreviousPhaseName("delete", "hot", new String[] { "hot", "delete" }); + assertPreviousPhaseName("delete", "cold", new String[] { "cold", "delete" }); + assertPreviousPhaseName("delete", null, new String[] { "delete" }); + assertPreviousPhaseName("delete", "cold", new String[] { "hot", "warm", "cold" }); + assertPreviousPhaseName("delete", "cold", new String[] { "warm", "cold" }); + assertPreviousPhaseName("delete", "warm", new String[] { "hot", "warm" }); + assertPreviousPhaseName("delete", "hot", new String[] { "hot" }); + assertPreviousPhaseName("delete", "cold", new String[] { "cold" }); + assertPreviousPhaseName("delete", null, new String[] {}); + + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> TimeseriesLifecycleType.INSTANCE.getPreviousPhaseName("foo", Collections.emptyMap())); + assertEquals("[foo] is not a valid phase for lifecycle type [" + TimeseriesLifecycleType.TYPE + "]", exception.getMessage()); + exception = expectThrows(IllegalArgumentException.class, () -> TimeseriesLifecycleType.INSTANCE + .getPreviousPhaseName("foo", Collections.singletonMap("foo", new Phase("foo", TimeValue.ZERO, Collections.emptyMap())))); + assertEquals("[foo] is not a valid phase for lifecycle type [" + TimeseriesLifecycleType.TYPE + "]", exception.getMessage()); + } + + public void testGetNextActionName() { + // Hot Phase + assertNextActionName("hot", RolloverAction.NAME, null, new String[] {}); + assertNextActionName("hot", RolloverAction.NAME, null, new String[] { RolloverAction.NAME }); + assertInvalidAction("hot", "foo", new String[] { RolloverAction.NAME }); + assertInvalidAction("hot", AllocateAction.NAME, new String[] { RolloverAction.NAME }); + assertInvalidAction("hot", DeleteAction.NAME, new String[] { RolloverAction.NAME }); + assertInvalidAction("hot", ForceMergeAction.NAME, new String[] { RolloverAction.NAME }); + assertInvalidAction("hot", ReadOnlyAction.NAME, new String[] { RolloverAction.NAME }); + assertInvalidAction("hot", ShrinkAction.NAME, new String[] { RolloverAction.NAME }); + + // Warm Phase + assertNextActionName("warm", ReadOnlyAction.NAME, AllocateAction.NAME, + new String[] { ReadOnlyAction.NAME, AllocateAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", ReadOnlyAction.NAME, ShrinkAction.NAME, + new String[] { ReadOnlyAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", ReadOnlyAction.NAME, ForceMergeAction.NAME, + new String[] { ReadOnlyAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", ReadOnlyAction.NAME, null, new String[] { ReadOnlyAction.NAME }); + + assertNextActionName("warm", ReadOnlyAction.NAME, AllocateAction.NAME, + new String[] { AllocateAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", ReadOnlyAction.NAME, ShrinkAction.NAME, new String[] { ShrinkAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", ReadOnlyAction.NAME, ForceMergeAction.NAME, new String[] { ForceMergeAction.NAME }); + assertNextActionName("warm", ReadOnlyAction.NAME, null, new String[] {}); + + assertNextActionName("warm", AllocateAction.NAME, ShrinkAction.NAME, + new String[] { ReadOnlyAction.NAME, AllocateAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", AllocateAction.NAME, ForceMergeAction.NAME, + new String[] { ReadOnlyAction.NAME, AllocateAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", AllocateAction.NAME, null, new String[] { ReadOnlyAction.NAME, AllocateAction.NAME }); + + assertNextActionName("warm", AllocateAction.NAME, ShrinkAction.NAME, new String[] { ShrinkAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", AllocateAction.NAME, ForceMergeAction.NAME, new String[] { ForceMergeAction.NAME }); + assertNextActionName("warm", AllocateAction.NAME, null, new String[] {}); + + assertNextActionName("warm", ShrinkAction.NAME, ForceMergeAction.NAME, + new String[] { ReadOnlyAction.NAME, AllocateAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", ShrinkAction.NAME, null, + new String[] { ReadOnlyAction.NAME, AllocateAction.NAME, ShrinkAction.NAME }); + + assertNextActionName("warm", ShrinkAction.NAME, ForceMergeAction.NAME, new String[] { ForceMergeAction.NAME }); + assertNextActionName("warm", ShrinkAction.NAME, null, new String[] {}); + + assertNextActionName("warm", ForceMergeAction.NAME, null, + new String[] { ReadOnlyAction.NAME, AllocateAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME }); + + assertNextActionName("warm", ForceMergeAction.NAME, null, new String[] {}); + + assertInvalidAction("warm", "foo", new String[] { RolloverAction.NAME }); + assertInvalidAction("warm", DeleteAction.NAME, + new String[] { ReadOnlyAction.NAME, AllocateAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME }); + assertInvalidAction("warm", RolloverAction.NAME, + new String[] { ReadOnlyAction.NAME, AllocateAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME }); + + // Cold Phase + assertNextActionName("cold", AllocateAction.NAME, null, new String[] { AllocateAction.NAME }); + + assertNextActionName("cold", AllocateAction.NAME, null, new String[] {}); + + assertNextActionName("cold", AllocateAction.NAME, null, new String[] {}); + + assertInvalidAction("cold", "foo", new String[] { AllocateAction.NAME }); + assertInvalidAction("cold", DeleteAction.NAME, new String[] { AllocateAction.NAME }); + assertInvalidAction("cold", ForceMergeAction.NAME, new String[] { AllocateAction.NAME }); + assertInvalidAction("cold", ReadOnlyAction.NAME, new String[] { AllocateAction.NAME }); + assertInvalidAction("cold", RolloverAction.NAME, new String[] { AllocateAction.NAME }); + assertInvalidAction("cold", ShrinkAction.NAME, new String[] { AllocateAction.NAME }); + + // Delete Phase + assertNextActionName("delete", DeleteAction.NAME, null, new String[] {}); + assertNextActionName("delete", DeleteAction.NAME, null, new String[] { DeleteAction.NAME }); + assertInvalidAction("delete", "foo", new String[] { DeleteAction.NAME }); + assertInvalidAction("delete", AllocateAction.NAME, new String[] { DeleteAction.NAME }); + assertInvalidAction("delete", ForceMergeAction.NAME, new String[] { DeleteAction.NAME }); + assertInvalidAction("delete", ReadOnlyAction.NAME, new String[] { DeleteAction.NAME }); + assertInvalidAction("delete", RolloverAction.NAME, new String[] { DeleteAction.NAME }); + assertInvalidAction("delete", ShrinkAction.NAME, new String[] { DeleteAction.NAME }); + + Phase phase = new Phase("foo", TimeValue.ZERO, Collections.emptyMap()); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> TimeseriesLifecycleType.INSTANCE.getNextActionName(ShrinkAction.NAME, phase)); + assertEquals("lifecycle type[" + TimeseriesLifecycleType.TYPE + "] does not support phase[" + phase.getName() + "]", + exception.getMessage()); + } + + private void assertNextActionName(String phaseName, String currentAction, String expectedNextAction, String... availableActionNames) { + Map availableActions = convertActionNamesToActions(availableActionNames); + Phase phase = new Phase(phaseName, TimeValue.ZERO, availableActions); + String nextAction = TimeseriesLifecycleType.INSTANCE.getNextActionName(currentAction, phase); + assertEquals(expectedNextAction, nextAction); + } + + private void assertInvalidAction(String phaseName, String currentAction, String... availableActionNames) { + Map availableActions = convertActionNamesToActions(availableActionNames); + Phase phase = new Phase(phaseName, TimeValue.ZERO, availableActions); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> TimeseriesLifecycleType.INSTANCE.getNextActionName(currentAction, phase)); + assertEquals("[" + currentAction + "] is not a valid action for phase [" + phaseName + "] in lifecycle type [" + + TimeseriesLifecycleType.TYPE + "]", exception.getMessage()); + } + + private ConcurrentMap convertActionNamesToActions(String... availableActionNames) { + return Arrays.asList(availableActionNames).stream().map(n -> { + switch (n) { + case AllocateAction.NAME: + return new AllocateAction(null, Collections.singletonMap("foo", "bar"), Collections.emptyMap(), Collections.emptyMap()); + case DeleteAction.NAME: + return new DeleteAction(); + case ForceMergeAction.NAME: + return new ForceMergeAction(1); + case ReadOnlyAction.NAME: + return new ReadOnlyAction(); + case RolloverAction.NAME: + return new RolloverAction(ByteSizeValue.parseBytesSizeValue("0b", "test"), TimeValue.ZERO, 1L); + case ShrinkAction.NAME: + return new ShrinkAction(1); + } + return new DeleteAction(); + }).collect(Collectors.toConcurrentMap(LifecycleAction::getWriteableName, Function.identity())); + } + + private void assertNextPhaseName(String currentPhase, String expectedNextPhase, String... availablePhaseNames) { + Map availablePhases = Arrays.asList(availablePhaseNames).stream() + .map(n -> new Phase(n, TimeValue.ZERO, Collections.emptyMap())) + .collect(Collectors.toMap(Phase::getName, Function.identity())); + String nextPhase = TimeseriesLifecycleType.INSTANCE.getNextPhaseName(currentPhase, availablePhases); + assertEquals(expectedNextPhase, nextPhase); + } + + private void assertPreviousPhaseName(String currentPhase, String expectedNextPhase, String... availablePhaseNames) { + Map availablePhases = Arrays.asList(availablePhaseNames).stream() + .map(n -> new Phase(n, TimeValue.ZERO, Collections.emptyMap())) + .collect(Collectors.toMap(Phase::getName, Function.identity())); + String nextPhase = TimeseriesLifecycleType.INSTANCE.getPreviousPhaseName(currentPhase, availablePhases); + assertEquals(expectedNextPhase, nextPhase); + } + + /** + * checks whether an ordered list of objects (usually Phase and LifecycleAction) are found in the same + * order as the ordered VALID_PHASES/VALID_HOT_ACTIONS/... lists + * @param orderedObjects the ordered objects to verify sort order of + * @param getKey the way to retrieve the key to sort against (Phase#getName, LifecycleAction#getName) + * @param validOrderedKeys the source of truth of the sort order + * @param the type of object + */ + private boolean isSorted(List orderedObjects, Function getKey, List validOrderedKeys) { + int validIndex = 0; + for (T obj : orderedObjects) { + String key = getKey.apply(obj); + int i = validIndex; + for (; i < validOrderedKeys.size(); i++) { + if (validOrderedKeys.get(i).equals(key)) { + validIndex = i; + break; + } + } + if (i == validOrderedKeys.size()) { + return false; + } + } + return true; + } + + private LifecycleAction getTestAction(String actionName) { + switch (actionName) { + case AllocateAction.NAME: + return TEST_ALLOCATE_ACTION; + case DeleteAction.NAME: + return TEST_DELETE_ACTION; + case ForceMergeAction.NAME: + return TEST_FORCE_MERGE_ACTION; + case ReadOnlyAction.NAME: + return TEST_READ_ONLY_ACTION; + case RolloverAction.NAME: + return TEST_ROLLOVER_ACTION; + case ShrinkAction.NAME: + return TEST_SHRINK_ACTION; + default: + throw new IllegalArgumentException("unsupported timeseries phase action [" + actionName + "]"); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStepTests.java new file mode 100644 index 0000000000000..5a4c88eaa6ab2 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStepTests.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; + +public class UpdateRolloverLifecycleDateStepTests extends AbstractStepTestCase { + + @Override + public UpdateRolloverLifecycleDateStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + return new UpdateRolloverLifecycleDateStep(stepKey, nextStepKey); + } + + @Override + public UpdateRolloverLifecycleDateStep mutateInstance(UpdateRolloverLifecycleDateStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + + if (randomBoolean()) { + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + } else { + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + } + + return new UpdateRolloverLifecycleDateStep(key, nextKey); + } + + @Override + public UpdateRolloverLifecycleDateStep copyInstance(UpdateRolloverLifecycleDateStep instance) { + return new UpdateRolloverLifecycleDateStep(instance.getKey(), instance.getNextStepKey()); + } + + @SuppressWarnings("unchecked") + public void testPerformAction() { + String alias = randomAlphaOfLength(3); + long creationDate = randomLongBetween(0, 1000000); + long rolloverTime = randomValueOtherThan(creationDate, () -> randomNonNegativeLong()); + IndexMetaData newIndexMetaData = IndexMetaData.builder(randomAlphaOfLength(11)) + .settings(settings(Version.CURRENT)).creationDate(creationDate) + .putAlias(AliasMetaData.builder(alias)).numberOfShards(randomIntBetween(1, 5)) + .numberOfReplicas(randomIntBetween(0, 5)).build(); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .putRolloverInfo(new RolloverInfo(alias, Collections.emptyList(), rolloverTime)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder() + .put(indexMetaData, false) + .put(newIndexMetaData, false)).build(); + + UpdateRolloverLifecycleDateStep step = createRandomInstance(); + ClusterState newState = step.performAction(indexMetaData.getIndex(), clusterState); + long actualRolloverTime = LifecycleExecutionState + .fromIndexMetadata(newState.metaData().index(indexMetaData.getIndex())) + .getLifecycleDate(); + assertThat(actualRolloverTime, equalTo(rolloverTime)); + } + + public void testPerformActionBeforeRolloverHappened() { + String alias = randomAlphaOfLength(3); + long creationDate = randomLongBetween(0, 1000000); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(11)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .creationDate(creationDate).putAlias(AliasMetaData.builder(alias)).numberOfShards(randomIntBetween(1, 5)) + .numberOfReplicas(randomIntBetween(0, 5)).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder().put(indexMetaData, false)).build(); + UpdateRolloverLifecycleDateStep step = createRandomInstance(); + + IllegalStateException exceptionThrown = expectThrows(IllegalStateException.class, + () -> step.performAction(indexMetaData.getIndex(), clusterState)); + assertThat(exceptionThrown.getMessage(), + equalTo("index [" + indexMetaData.getIndex().getName() + "] has not rolled over yet")); + } + + public void testPerformActionWithNoRolloverAliasSetting() { + long creationDate = randomLongBetween(0, 1000000); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(11)) + .settings(settings(Version.CURRENT)).creationDate(creationDate).numberOfShards(randomIntBetween(1, 5)) + .numberOfReplicas(randomIntBetween(0, 5)).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder().put(indexMetaData, false)).build(); + UpdateRolloverLifecycleDateStep step = createRandomInstance(); + + IllegalStateException exceptionThrown = expectThrows(IllegalStateException.class, + () -> step.performAction(indexMetaData.getIndex(), clusterState)); + assertThat(exceptionThrown.getMessage(), + equalTo("setting [index.lifecycle.rollover_alias] is not set on index [" + indexMetaData.getIndex().getName() +"]")); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStepTests.java new file mode 100644 index 0000000000000..22908146af21f --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStepTests.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.indexlifecycle.AsyncActionStep.Listener; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.junit.Before; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import static org.hamcrest.Matchers.equalTo; + +public class UpdateSettingsStepTests extends AbstractStepTestCase { + + private Client client; + + @Before + public void setup() { + client = Mockito.mock(Client.class); + } + + @Override + public UpdateSettingsStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + Settings settings = Settings.builder().put(randomAlphaOfLength(10), randomAlphaOfLength(10)).build(); + + return new UpdateSettingsStep(stepKey, nextStepKey, client, settings); + } + + @Override + public UpdateSettingsStep mutateInstance(UpdateSettingsStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + Settings settings = instance.getSettings(); + + switch (between(0, 2)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + settings = Settings.builder().put(settings).put(randomAlphaOfLength(10), randomInt()).build(); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new UpdateSettingsStep(key, nextKey, client, settings); + } + + @Override + public UpdateSettingsStep copyInstance(UpdateSettingsStep instance) { + return new UpdateSettingsStep(instance.getKey(), instance.getNextStepKey(), instance.getClient(), instance.getSettings()); + } + + public void testPerformAction() throws Exception { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + UpdateSettingsStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + UpdateSettingsRequest request = (UpdateSettingsRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + assertThat(request.settings(), equalTo(step.getSettings())); + assertThat(request.indices(), equalTo(new String[] {indexMetaData.getIndex().getName()})); + listener.onResponse(new AcknowledgedResponse(true)); + return null; + } + + }).when(indicesClient).updateSettings(Mockito.any(), Mockito.any()); + + SetOnce actionCompleted = new SetOnce<>(); + + step.performAction(indexMetaData, null, new Listener() { + + @Override + public void onResponse(boolean complete) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertEquals(true, actionCompleted.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).updateSettings(Mockito.any(), Mockito.any()); + } + + public void testPerformActionFailure() { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Exception exception = new RuntimeException(); + UpdateSettingsStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + UpdateSettingsRequest request = (UpdateSettingsRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + assertThat(request.settings(), equalTo(step.getSettings())); + assertThat(request.indices(), equalTo(new String[] {indexMetaData.getIndex().getName()})); + listener.onFailure(exception); + return null; + } + + }).when(indicesClient).updateSettings(Mockito.any(), Mockito.any()); + + SetOnce exceptionThrown = new SetOnce<>(); + step.performAction(indexMetaData, null, new Listener() { + + @Override + public void onResponse(boolean complete) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertSame(exception, e); + exceptionThrown.set(true); + } + }); + + assertEquals(true, exceptionThrown.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).updateSettings(Mockito.any(), Mockito.any()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleRequestTests.java new file mode 100644 index 0000000000000..c864bd76eeac5 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleRequestTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction.Request; + +public class DeleteLifecycleRequestTests extends AbstractStreamableTestCase { + + @Override + protected Request createTestInstance() { + return new Request(randomAlphaOfLengthBetween(1, 20)); + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request mutateInstance(Request request) { + return new Request(request.getPolicyName() + randomAlphaOfLengthBetween(1, 10)); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleResponseTests.java new file mode 100644 index 0000000000000..b671f72cf99c4 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleResponseTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction.Response; + +public class DeleteLifecycleResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + return new Response(randomBoolean()); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + + @Override + protected Response mutateInstance(Response response) { + return new Response(response.isAcknowledged() == false); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleRequestTests.java new file mode 100644 index 0000000000000..49caa0b48894e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleRequestTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction.Request; + +import java.util.Arrays; + +public class GetLifecycleRequestTests extends AbstractStreamableTestCase { + + @Override + protected Request createTestInstance() { + return new Request(randomAlphaOfLengthBetween(1, 20)); + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request mutateInstance(Request request) { + String[] originalPolicies = request.getPolicyNames(); + String[] newPolicies = Arrays.copyOf(originalPolicies, originalPolicies.length + 1); + newPolicies[originalPolicies.length] = randomAlphaOfLength(5); + return new Request(newPolicies); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleResponseTests.java new file mode 100644 index 0000000000000..08688407b3db6 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleResponseTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.MockAction; +import org.elasticsearch.xpack.core.indexlifecycle.TestLifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction.LifecyclePolicyResponseItem; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction.Response; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTests.randomTestLifecyclePolicy; + +public class GetLifecycleResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + String randomPrefix = randomAlphaOfLength(5); + List responseItems = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(0, 2); i++) { + responseItems.add(new LifecyclePolicyResponseItem(randomTestLifecyclePolicy(randomPrefix + i), + randomNonNegativeLong(), randomAlphaOfLength(8))); + } + return new Response(responseItems); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + Arrays.asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new), + new NamedWriteableRegistry.Entry(LifecycleType.class, TestLifecycleType.TYPE, in -> TestLifecycleType.INSTANCE))); + } + + @Override + protected Response mutateInstance(Response response) { + List responseItems = new ArrayList<>(response.getPolicies()); + if (responseItems.size() > 0) { + if (randomBoolean()) { + responseItems.add(new LifecyclePolicyResponseItem(randomTestLifecyclePolicy(randomAlphaOfLength(5)), + randomNonNegativeLong(), randomAlphaOfLength(4))); + } else { + responseItems.remove(0); + } + } else { + responseItems.add(new LifecyclePolicyResponseItem(randomTestLifecyclePolicy(randomAlphaOfLength(2)), + randomNonNegativeLong(), randomAlphaOfLength(4))); + } + return new Response(responseItems); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepRequestTests.java new file mode 100644 index 0000000000000..84b966b402323 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepRequestTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + * + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.elasticsearch.xpack.core.indexlifecycle.StepKeyTests; +import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction.Request; +import org.junit.Before; + +public class MoveToStepRequestTests extends AbstractStreamableXContentTestCase { + + private String index; + private static final StepKeyTests stepKeyTests = new StepKeyTests(); + + @Before + public void setup() { + index = randomAlphaOfLength(5); + } + + @Override + protected Request createTestInstance() { + return new Request(index, stepKeyTests.createTestInstance(), stepKeyTests.createTestInstance()); + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return Request.parseRequest(index, parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request mutateInstance(Request request) { + String index = request.getIndex(); + StepKey currentStepKey = request.getCurrentStepKey(); + StepKey nextStepKey = request.getNextStepKey(); + + switch (between(0, 2)) { + case 0: + index += randomAlphaOfLength(5); + break; + case 1: + currentStepKey = stepKeyTests.mutateInstance(currentStepKey); + break; + case 2: + nextStepKey = stepKeyTests.mutateInstance(nextStepKey); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new Request(index, currentStepKey, nextStepKey); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepResponseTests.java new file mode 100644 index 0000000000000..7f9bacd35ac25 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepResponseTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + * + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction.Response; + +public class MoveToStepResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + return new Response(randomBoolean()); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + + @Override + protected Response mutateInstance(Response response) { + return new Response(response.isAcknowledged() == false); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleRequestTests.java new file mode 100644 index 0000000000000..5df60a7333143 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleRequestTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; +import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; +import org.elasticsearch.xpack.core.indexlifecycle.ForceMergeAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTests; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.ReadOnlyAction; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction.Request; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class PutLifecycleRequestTests extends AbstractStreamableXContentTestCase { + + private String lifecycleName; + + @Before + public void setup() { + lifecycleName = randomAlphaOfLength(20); + } + + @Override + protected Request createTestInstance() { + return new Request(LifecyclePolicyTests.randomTimeseriesLifecyclePolicy(lifecycleName)); + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return PutLifecycleAction.Request.parseRequest(lifecycleName, parser); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + Arrays.asList( + new NamedWriteableRegistry.Entry(LifecycleType.class, TimeseriesLifecycleType.TYPE, + (in) -> TimeseriesLifecycleType.INSTANCE), + new NamedWriteableRegistry.Entry(LifecycleAction.class, AllocateAction.NAME, AllocateAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, DeleteAction.NAME, DeleteAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ForceMergeAction.NAME, ForceMergeAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ReadOnlyAction.NAME, ReadOnlyAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, RolloverAction.NAME, RolloverAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ShrinkAction.NAME, ShrinkAction::new) + )); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), + (p) -> TimeseriesLifecycleType.INSTANCE), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse) + )); + return new NamedXContentRegistry(entries); + } + + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request mutateInstance(Request request) { + String name = randomBoolean() ? lifecycleName : randomAlphaOfLength(5); + LifecyclePolicy policy = randomValueOtherThan(request.getPolicy(), + () -> LifecyclePolicyTests.randomTimeseriesLifecyclePolicy(name)); + return new Request(policy); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleResponseTests.java new file mode 100644 index 0000000000000..0c9acb2aa5dd2 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleResponseTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction.Response; + +public class PutLifecycleResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + return new Response(randomBoolean()); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + + @Override + protected Response mutateInstance(Response response) { + return new Response(response.isAcknowledged() == false); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyRequestTests.java new file mode 100644 index 0000000000000..3f36820422614 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyRequestTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction.Request; + +import java.io.IOException; +import java.util.Arrays; + +public class RemoveIndexLifecyclePolicyRequestTests extends AbstractStreamableTestCase { + + @Override + protected Request createTestInstance() { + Request request = new Request(generateRandomStringArray(20, 20, false)); + if (randomBoolean()) { + IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean()); + request.indicesOptions(indicesOptions); + } + if (randomBoolean()) { + request.indices(generateRandomStringArray(20, 20, false)); + } + return request; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request mutateInstance(Request instance) throws IOException { + String[] indices = instance.indices(); + IndicesOptions indicesOptions = instance.indicesOptions(); + switch (between(0, 1)) { + case 0: + indices = randomValueOtherThanMany(i -> Arrays.equals(i, instance.indices()), + () -> generateRandomStringArray(20, 20, false)); + break; + case 1: + indicesOptions = randomValueOtherThan(indicesOptions, () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + Request newRequest = new Request(indices); + newRequest.indicesOptions(indicesOptions); + return newRequest; + } + + public void testNullIndices() { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new Request((String[]) null)); + assertEquals("indices cannot be null", exception.getMessage()); + } + + public void testValidate() { + Request request = createTestInstance(); + assertNull(request.validate()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyResponseTests.java new file mode 100644 index 0000000000000..a394e593e7307 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyResponseTests.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction.Response; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +public class RemoveIndexLifecyclePolicyResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected Response createBlankInstance() { + return new Response(); + } + + @Override + protected Response createTestInstance() { + List failedIndexes = Arrays.asList(generateRandomStringArray(20, 20, false)); + return new Response(failedIndexes); + } + + @Override + protected Response mutateInstance(Response instance) throws IOException { + List failedIndices = randomValueOtherThan(instance.getFailedIndexes(), + () -> Arrays.asList(generateRandomStringArray(20, 20, false))); + return new Response(failedIndices); + } + + @Override + protected Response doParseInstance(XContentParser parser) throws IOException { + return Response.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testNullFailedIndices() { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> new Response((List) null)); + assertEquals("failed_indexes cannot be null", exception.getMessage()); + } + + public void testHasFailures() { + Response response = new Response(new ArrayList<>()); + assertFalse(response.hasFailures()); + assertEquals(Collections.emptyList(), response.getFailedIndexes()); + + int size = randomIntBetween(1, 10); + List failedIndexes = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + failedIndexes.add(randomAlphaOfLength(20)); + } + response = new Response(failedIndexes); + assertTrue(response.hasFailures()); + assertEquals(failedIndexes, response.getFailedIndexes()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryRequestTests.java new file mode 100644 index 0000000000000..12ae3dede8a95 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryRequestTests.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + * + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction.Request; + +import java.io.IOException; +import java.util.Arrays; + +public class RetryRequestTests extends AbstractStreamableTestCase { + + @Override + protected Request createTestInstance() { + Request request = new Request(); + if (randomBoolean()) { + request.indices(generateRandomStringArray(20, 20, false)); + } + if (randomBoolean()) { + IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean()); + request.indicesOptions(indicesOptions); + } + return request; + } + + @Override + protected Request mutateInstance(Request instance) throws IOException { + String[] indices = instance.indices(); + IndicesOptions indicesOptions = instance.indicesOptions(); + switch (between(0, 1)) { + case 0: + indices = randomValueOtherThanMany(i -> Arrays.equals(i, instance.indices()), + () -> generateRandomStringArray(20, 10, false, true)); + break; + case 1: + indicesOptions = randomValueOtherThan(indicesOptions, () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + Request newRequest = new Request(); + newRequest.indices(indices); + newRequest.indicesOptions(indicesOptions); + return newRequest; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryResponseTests.java new file mode 100644 index 0000000000000..24e758f9503c1 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryResponseTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + * + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction.Response; + +public class RetryResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + return new Response(randomBoolean()); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + + @Override + protected Response mutateInstance(Response response) { + return new Response(response.isAcknowledged() == false); + } + +} diff --git a/x-pack/plugin/ilm/build.gradle b/x-pack/plugin/ilm/build.gradle new file mode 100644 index 0000000000000..71def8937817c --- /dev/null +++ b/x-pack/plugin/ilm/build.gradle @@ -0,0 +1,34 @@ +evaluationDependsOn(xpackModule('core')) + +apply plugin: 'elasticsearch.esplugin' + +esplugin { + name 'x-pack-ilm' + description 'Elasticsearch Expanded Pack Plugin - Index Lifecycle Management' + classname 'org.elasticsearch.xpack.indexlifecycle.IndexLifecycle' + extendedPlugins = ['x-pack-core'] + hasNativeController false + requiresKeystore true +} +archivesBaseName = 'x-pack-ilm' + +dependencies { + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +// add all sub-projects of the qa sub-project +gradle.projectsEvaluated { + project.subprojects + .find { it.path == project.path + ":qa" } + .subprojects + .findAll { it.path.startsWith(project.path + ":qa") } + .each { check.dependsOn it.check } +} + +integTest.enabled = false + +run { + plugin xpackModule('core') +} diff --git a/x-pack/plugin/ilm/qa/build.gradle b/x-pack/plugin/ilm/qa/build.gradle new file mode 100644 index 0000000000000..90a6371ea907f --- /dev/null +++ b/x-pack/plugin/ilm/qa/build.gradle @@ -0,0 +1,32 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.build' +test.enabled = false + +dependencies { + compile project(':test:framework') +} + +subprojects { + project.tasks.withType(RestIntegTestTask) { + final File xPackResources = new File(xpackProject('plugin').projectDir, 'src/test/resources') + project.copyRestSpec.from(xPackResources) { + include 'rest-api-spec/api/**' + } + } +} + +/* Remove assemble on all qa projects because we don't need to publish + * artifacts for them. */ +gradle.projectsEvaluated { + subprojects { + Task assemble = project.tasks.findByName('assemble') + if (assemble) { + project.tasks.remove(assemble) + project.build.dependsOn.remove('assemble') + } + } +} + +// the qa modules does not have any source files +licenseHeaders.enabled = false diff --git a/x-pack/plugin/ilm/qa/multi-node/build.gradle b/x-pack/plugin/ilm/qa/multi-node/build.gradle new file mode 100644 index 0000000000000..edd7f3aad472e --- /dev/null +++ b/x-pack/plugin/ilm/qa/multi-node/build.gradle @@ -0,0 +1,20 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts') +} + +integTestCluster { + numNodes = 4 + clusterName = 'ilm' + + setting 'xpack.ilm.enabled', 'true' + setting 'xpack.security.enabled', 'false' + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + setting 'indices.lifecycle.poll_interval', '1000ms' + +} diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/ChangePolicyforIndexIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/ChangePolicyforIndexIT.java new file mode 100644 index 0000000000000..c0775c9e51890 --- /dev/null +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/ChangePolicyforIndexIT.java @@ -0,0 +1,144 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverStep; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +public class ChangePolicyforIndexIT extends ESRestTestCase { + + /** + * This test aims to prove that an index will finish the current phase on an + * existing definition when the policy is changed for that index, and that + * after completing the current phase the new policy will be used for + * subsequent phases. + * + * The test creates two policies, one with a hot phase requiring 1 document + * to rollover and a warm phase with an impossible allocation action. The + * second policy has a rollover action requiring 1000 document and a warm + * phase that moves the index to known nodes that will succeed. An index is + * created with the fiorst policy set and the test ensures the policy is in + * the rollover step. It then changes the policy for the index to the second + * policy. It indexes a single document and checks that the index moves past + * the hot phase and through the warm phasee (proving the hot phase + * definition from the first policy was used) and then checks the allocation + * settings from the second policy are set ont he index (proving the second + * policy was used for the warm phase) + */ + public void testChangePolicyForIndex() throws Exception { + String indexName = "test-000001"; + // create policy_1 and policy_2 + Map phases1 = new HashMap<>(); + phases1.put("hot", new Phase("hot", TimeValue.ZERO, singletonMap(RolloverAction.NAME, new RolloverAction(null, null, 1L)))); + phases1.put("warm", new Phase("warm", TimeValue.ZERO, + singletonMap(AllocateAction.NAME, new AllocateAction(1, singletonMap("_name", "foobarbaz"), null, null)))); + LifecyclePolicy lifecyclePolicy1 = new LifecyclePolicy("policy_1", phases1); + Map phases2 = new HashMap<>(); + phases2.put("hot", new Phase("hot", TimeValue.ZERO, singletonMap(RolloverAction.NAME, new RolloverAction(null, null, 1000L)))); + phases2.put("warm", new Phase("warm", TimeValue.ZERO, + singletonMap(AllocateAction.NAME, new AllocateAction(1, singletonMap("_name", "node-1,node-2"), null, null)))); + LifecyclePolicy lifecyclePolicy2 = new LifecyclePolicy("policy_1", phases2); + // PUT policy_1 and policy_2 + XContentBuilder builder1 = jsonBuilder(); + lifecyclePolicy1.toXContent(builder1, null); + final StringEntity entity1 = new StringEntity("{ \"policy\":" + Strings.toString(builder1) + "}", ContentType.APPLICATION_JSON); + Request request1 = new Request("PUT", "_ilm/policy/" + "policy_1"); + request1.setEntity(entity1); + assertOK(client().performRequest(request1)); + XContentBuilder builder2 = jsonBuilder(); + lifecyclePolicy2.toXContent(builder2, null); + final StringEntity entity2 = new StringEntity("{ \"policy\":" + Strings.toString(builder2) + "}", ContentType.APPLICATION_JSON); + Request request2 = new Request("PUT", "_ilm/policy/" + "policy_2"); + request2.setEntity(entity2); + assertOK(client().performRequest(request2)); + + // create the test-index index and set the policy to policy_1 + Settings settings = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 4) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put("index.routing.allocation.include._name", "node-0") + .put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, "alias").put(LifecycleSettings.LIFECYCLE_NAME, "policy_1").build(); + Request createIndexRequest = new Request("PUT", "/" + indexName); + createIndexRequest.setJsonEntity( + "{\n \"settings\": " + Strings.toString(settings) + ", \"aliases\" : { \"alias\": { \"is_write_index\": true } } }"); + client().performRequest(createIndexRequest); + // wait for the shards to initialize + ensureGreen(indexName); + + // Check the index is on the attempt rollover step + assertBusy(() -> assertStep(indexName, new StepKey("hot", RolloverAction.NAME, RolloverStep.NAME))); + + // Change the policy to policy_2 + Request changePolicyRequest = new Request("PUT", "/" + indexName + "/_settings"); + final StringEntity changePolicyEntity = new StringEntity("{ \"index.lifecycle.name\": \"policy_2\" }", + ContentType.APPLICATION_JSON); + changePolicyRequest.setEntity(changePolicyEntity); + assertOK(client().performRequest(changePolicyRequest)); + + // Check the index is still on the attempt rollover step + assertBusy(() -> assertStep(indexName, new StepKey("hot", RolloverAction.NAME, RolloverStep.NAME))); + + // Index a single document + XContentBuilder document = jsonBuilder().startObject(); + document.field("foo", "bar"); + document.endObject(); + final Request request = new Request("POST", "/" + indexName + "/_doc/1"); + request.setJsonEntity(Strings.toString(document)); + assertOK(client().performRequest(request)); + + // Check the index goes to the warm phase and completes + assertBusy(() -> assertStep(indexName, TerminalPolicyStep.KEY)); + + // Check index is allocated on node-1 and node-2 as per policy_2 + Request getSettingsRequest = new Request("GET", "/" + indexName + "/_settings"); + Response getSettingsResponse = client().performRequest(getSettingsRequest); + assertOK(getSettingsResponse); + Map getSettingsResponseMap = entityAsMap(getSettingsResponse); + @SuppressWarnings("unchecked") + Map indexSettings = (Map) ((Map) getSettingsResponseMap.get(indexName)) + .get("settings"); + @SuppressWarnings("unchecked") + Map routingSettings = (Map) ((Map) indexSettings.get("index")).get("routing"); + @SuppressWarnings("unchecked") + String includesAllocation = (String) ((Map) ((Map) routingSettings.get("allocation")) + .get("include")).get("_name"); + assertEquals("node-1,node-2", includesAllocation); + } + + private void assertStep(String indexName, StepKey expectedStep) throws IOException { + Response explainResponse = client().performRequest(new Request("GET", "/" + indexName + "/_ilm/explain")); + assertOK(explainResponse); + Map explainResponseMap = entityAsMap(explainResponse); + @SuppressWarnings("unchecked") + Map indexExplainResponse = (Map) ((Map) explainResponseMap.get("indices")) + .get(indexName); + assertEquals(expectedStep.getPhase(), indexExplainResponse.get("phase")); + assertEquals(expectedStep.getAction(), indexExplainResponse.get("action")); + assertEquals(expectedStep.getName(), indexExplainResponse.get("step")); + } +} diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java new file mode 100644 index 0000000000000..352f52bd1aa7a --- /dev/null +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java @@ -0,0 +1,463 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; +import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; +import org.elasticsearch.xpack.core.indexlifecycle.ForceMergeAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.ReadOnlyAction; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; +import org.junit.Before; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.function.Supplier; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.not; + +public class TimeSeriesLifecycleActionsIT extends ESRestTestCase { + private String index; + private String policy; + + @Before + public void refreshIndex() { + index = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + policy = randomAlphaOfLength(5); + } + + public static void updatePolicy(String indexName, String policy) throws IOException { + + Request changePolicyRequest = new Request("PUT", "/" + indexName + "/_settings"); + final StringEntity changePolicyEntity = new StringEntity("{ \"index.lifecycle.name\": \"" + policy + "\" }", + ContentType.APPLICATION_JSON); + changePolicyRequest.setEntity(changePolicyEntity); + assertOK(client().performRequest(changePolicyRequest)); + } + + public void testFullPolicy() throws Exception { + String originalIndex = index + "-000001"; + String shrunkenOriginalIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + originalIndex; + String secondIndex = index + "-000002"; + createIndexWithSettings(originalIndex, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 4) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.routing.allocation.include._name", "node-0") + .put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, "alias")); + + // create policy + createFullPolicy(TimeValue.ZERO); + // update policy on index + updatePolicy(originalIndex, policy); + // index document {"foo": "bar"} to trigger rollover + index(client(), originalIndex, "_id", "foo", "bar"); + + /* + * These asserts are in the order that they should be satisfied in, in + * order to maximize the time for all operations to complete. + * An "out of order" assert here may result in this test occasionally + * timing out and failing inappropriately. + */ + // asserts that rollover was called + assertBusy(() -> assertTrue(indexExists(secondIndex))); + // asserts that shrink deleted the original index + assertBusy(() -> assertFalse(indexExists(originalIndex))); + // asserts that the delete phase completed for the managed shrunken index + assertBusy(() -> assertFalse(indexExists(shrunkenOriginalIndex))); + } + + public void testMoveToAllocateStep() throws Exception { + String originalIndex = index + "-000001"; + createIndexWithSettings(originalIndex, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 4) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.routing.allocation.include._name", "node-0") + .put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, "alias")); + + // create policy + createFullPolicy(TimeValue.timeValueHours(10)); + // update policy on index + updatePolicy(originalIndex, policy); + + // move to a step + Request moveToStepRequest = new Request("POST", "_ilm/move/" + originalIndex); + assertBusy(() -> assertTrue(getStepKeyForIndex(originalIndex).equals(new StepKey("new", "complete", "complete")))); + moveToStepRequest.setJsonEntity("{\n" + + " \"current_step\": {\n" + + " \"phase\": \"new\",\n" + + " \"action\": \"complete\",\n" + + " \"name\": \"complete\"\n" + + " },\n" + + " \"next_step\": {\n" + + " \"phase\": \"cold\",\n" + + " \"action\": \"allocate\",\n" + + " \"name\": \"allocate\"\n" + + " }\n" + + "}"); + client().performRequest(moveToStepRequest); + assertBusy(() -> assertFalse(indexExists(originalIndex))); + } + + + public void testMoveToRolloverStep() throws Exception { + String originalIndex = index + "-000001"; + String shrunkenOriginalIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + originalIndex; + String secondIndex = index + "-000002"; + createIndexWithSettings(originalIndex, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 4) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.routing.allocation.include._name", "node-0") + .put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, "alias")); + + createFullPolicy(TimeValue.timeValueHours(10)); + // update policy on index + updatePolicy(originalIndex, policy); + + // move to a step + Request moveToStepRequest = new Request("POST", "_ilm/move/" + originalIndex); + // index document to trigger rollover + index(client(), originalIndex, "_id", "foo", "bar"); + logger.info(getStepKeyForIndex(originalIndex)); + moveToStepRequest.setJsonEntity("{\n" + + " \"current_step\": {\n" + + " \"phase\": \"new\",\n" + + " \"action\": \"complete\",\n" + + " \"name\": \"complete\"\n" + + " },\n" + + " \"next_step\": {\n" + + " \"phase\": \"hot\",\n" + + " \"action\": \"rollover\",\n" + + " \"name\": \"attempt_rollover\"\n" + + " }\n" + + "}"); + client().performRequest(moveToStepRequest); + + /* + * These asserts are in the order that they should be satisfied in, in + * order to maximize the time for all operations to complete. + * An "out of order" assert here may result in this test occasionally + * timing out and failing inappropriately. + */ + // asserts that rollover was called + assertBusy(() -> assertTrue(indexExists(secondIndex))); + // asserts that shrink deleted the original index + assertBusy(() -> assertFalse(indexExists(originalIndex))); + // asserts that the delete phase completed for the managed shrunken index + assertBusy(() -> assertFalse(indexExists(shrunkenOriginalIndex))); + } + + public void testRolloverAction() throws Exception { + String originalIndex = index + "-000001"; + String secondIndex = index + "-000002"; + createIndexWithSettings(originalIndex, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, "alias")); + + // create policy + createNewSingletonPolicy("hot", new RolloverAction(null, null, 1L)); + // update policy on index + updatePolicy(originalIndex, policy); + // index document {"foo": "bar"} to trigger rollover + index(client(), originalIndex, "_id", "foo", "bar"); + assertBusy(() -> assertTrue(indexExists(secondIndex))); + assertBusy(() -> assertTrue(indexExists(originalIndex))); + } + + public void testAllocateOnlyAllocation() throws Exception { + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + String allocateNodeName = "node-" + randomFrom(0, 1); + AllocateAction allocateAction = new AllocateAction(null, null, null, singletonMap("_name", allocateNodeName)); + createNewSingletonPolicy(randomFrom("warm", "cold"), allocateAction); + updatePolicy(index, policy); + assertBusy(() -> { + assertThat(getStepKeyForIndex(index), equalTo(TerminalPolicyStep.KEY)); + }); + ensureGreen(index); + } + + public void testAllocateActionOnlyReplicas() throws Exception { + int numShards = randomFrom(1, 5); + int numReplicas = randomFrom(0, 1); + int finalNumReplicas = (numReplicas + 1) % 2; + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numReplicas)); + AllocateAction allocateAction = new AllocateAction(finalNumReplicas, null, null, null); + createNewSingletonPolicy(randomFrom("warm", "cold"), allocateAction); + updatePolicy(index, policy); + assertBusy(() -> { + Map settings = getOnlyIndexSettings(index); + assertThat(getStepKeyForIndex(index), equalTo(TerminalPolicyStep.KEY)); + assertThat(settings.get(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey()), equalTo(String.valueOf(finalNumReplicas))); + }); + } + + public void testDelete() throws Exception { + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + createNewSingletonPolicy("delete", new DeleteAction()); + updatePolicy(index, policy); + assertBusy(() -> assertFalse(indexExists(index))); + } + + public void testDeleteOnlyShouldNotMakeIndexReadonly() throws Exception { + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + createNewSingletonPolicy("delete", new DeleteAction(), TimeValue.timeValueHours(1)); + updatePolicy(index, policy); + assertBusy(() -> { + assertThat(getStepKeyForIndex(index).getAction(), equalTo("complete")); + Map settings = getOnlyIndexSettings(index); + assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), not("true")); + }); + indexDocument(); + } + + public void testReadOnly() throws Exception { + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + createNewSingletonPolicy("warm", new ReadOnlyAction()); + updatePolicy(index, policy); + assertBusy(() -> { + Map settings = getOnlyIndexSettings(index); + assertThat(getStepKeyForIndex(index), equalTo(TerminalPolicyStep.KEY)); + assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true")); + }); + } + + @SuppressWarnings("unchecked") + public void testForceMergeAction() throws Exception { + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + for (int i = 0; i < randomIntBetween(2, 10); i++) { + Request request = new Request("PUT", index + "/_doc/" + i); + request.addParameter("refresh", "true"); + request.setEntity(new StringEntity("{\"a\": \"test\"}", ContentType.APPLICATION_JSON)); + client().performRequest(request); + } + + Supplier numSegments = () -> { + try { + Map segmentResponse = getAsMap(index + "/_segments"); + segmentResponse = (Map) segmentResponse.get("indices"); + segmentResponse = (Map) segmentResponse.get(index); + segmentResponse = (Map) segmentResponse.get("shards"); + List> shards = (List>) segmentResponse.get("0"); + return (Integer) shards.get(0).get("num_search_segments"); + } catch (Exception e) { + throw new RuntimeException(e); + } + }; + assertThat(numSegments.get(), greaterThan(1)); + + createNewSingletonPolicy("warm", new ForceMergeAction(1)); + updatePolicy(index, policy); + + assertBusy(() -> { + assertThat(getStepKeyForIndex(index), equalTo(TerminalPolicyStep.KEY)); + Map settings = getOnlyIndexSettings(index); + assertThat(numSegments.get(), equalTo(1)); + assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true")); + }); + expectThrows(ResponseException.class, this::indexDocument); + } + + public void testShrinkAction() throws Exception { + int numShards = 6; + int divisor = randomFrom(2, 3, 6); + int expectedFinalShards = numShards / divisor; + String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index; + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + createNewSingletonPolicy("warm", new ShrinkAction(expectedFinalShards)); + updatePolicy(index, policy); + assertBusy(() -> { + assertTrue(indexExists(shrunkenIndex)); + assertTrue(aliasExists(shrunkenIndex, index)); + Map settings = getOnlyIndexSettings(shrunkenIndex); + assertThat(getStepKeyForIndex(shrunkenIndex), equalTo(TerminalPolicyStep.KEY)); + assertThat(settings.get(IndexMetaData.SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(expectedFinalShards))); + assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true")); + }); + expectThrows(ResponseException.class, this::indexDocument); + } + + @SuppressWarnings("unchecked") + public void testNonexistentPolicy() throws Exception { + String indexPrefix = randomAlphaOfLengthBetween(5,15).toLowerCase(Locale.ROOT); + final StringEntity template = new StringEntity("{\n" + + " \"index_patterns\": \"" + indexPrefix + "*\",\n" + + " \"settings\": {\n" + + " \"index\": {\n" + + " \"lifecycle\": {\n" + + " \"name\": \"does_not_exist\",\n" + + " \"rollover_alias\": \"test_alias\"\n" + + " }\n" + + " }\n" + + " }\n" + + "}", ContentType.APPLICATION_JSON); + Request templateRequest = new Request("PUT", "_template/test"); + templateRequest.setEntity(template); + client().performRequest(templateRequest); + + policy = randomAlphaOfLengthBetween(5,20); + createNewSingletonPolicy("hot", new RolloverAction(null, null, 1L)); + + index = indexPrefix + "-000001"; + final StringEntity putIndex = new StringEntity("{\n" + + " \"aliases\": {\n" + + " \"test_alias\": {\n" + + " \"is_write_index\": true\n" + + " }\n" + + " }\n" + + "}", ContentType.APPLICATION_JSON); + Request putIndexRequest = new Request("PUT", index); + putIndexRequest.setEntity(putIndex); + client().performRequest(putIndexRequest); + indexDocument(); + + assertBusy(() -> { + Request explainRequest = new Request("GET", index + "/_ilm/explain"); + Response response = client().performRequest(explainRequest); + Map responseMap; + try (InputStream is = response.getEntity().getContent()) { + responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + logger.info(responseMap); + Map indexStatus = (Map)((Map) responseMap.get("indices")).get(index); + assertNull(indexStatus.get("phase")); + assertNull(indexStatus.get("action")); + assertNull(indexStatus.get("step")); + Map stepInfo = (Map) indexStatus.get("step_info"); + assertNotNull(stepInfo); + assertEquals("policy [does_not_exist] does not exist", stepInfo.get("reason")); + assertEquals("illegal_argument_exception", stepInfo.get("type")); + }); + + } + + private void createFullPolicy(TimeValue hotTime) throws IOException { + Map warmActions = new HashMap<>(); + warmActions.put(ForceMergeAction.NAME, new ForceMergeAction(1)); + warmActions.put(AllocateAction.NAME, new AllocateAction(1, singletonMap("_name", "node-1,node-2"), null, null)); + warmActions.put(ShrinkAction.NAME, new ShrinkAction(1)); + Map phases = new HashMap<>(); + phases.put("hot", new Phase("hot", hotTime, singletonMap(RolloverAction.NAME, + new RolloverAction(null, null, 1L)))); + phases.put("warm", new Phase("warm", TimeValue.ZERO, warmActions)); + phases.put("cold", new Phase("cold", TimeValue.ZERO, singletonMap(AllocateAction.NAME, + new AllocateAction(0, singletonMap("_name", "node-3"), null, null)))); + phases.put("delete", new Phase("delete", TimeValue.ZERO, singletonMap(DeleteAction.NAME, new DeleteAction()))); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, phases); + // PUT policy + XContentBuilder builder = jsonBuilder(); + lifecyclePolicy.toXContent(builder, null); + final StringEntity entity = new StringEntity( + "{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); + Request request = new Request("PUT", "_ilm/policy/" + policy); + request.setEntity(entity); + assertOK(client().performRequest(request)); + } + + private void createNewSingletonPolicy(String phaseName, LifecycleAction action) throws IOException { + createNewSingletonPolicy(phaseName, action, TimeValue.ZERO); + } + + private void createNewSingletonPolicy(String phaseName, LifecycleAction action, TimeValue after) throws IOException { + Phase phase = new Phase(phaseName, after, singletonMap(action.getWriteableName(), action)); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, singletonMap(phase.getName(), phase)); + XContentBuilder builder = jsonBuilder(); + lifecyclePolicy.toXContent(builder, null); + final StringEntity entity = new StringEntity( + "{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); + Request request = new Request("PUT", "_ilm/policy/" + policy); + request.setEntity(entity); + client().performRequest(request); + } + + private void createIndexWithSettings(String index, Settings.Builder settings) throws IOException { + // create the test-index index + Request request = new Request("PUT", "/" + index); + request.setJsonEntity("{\n \"settings\": " + Strings.toString(settings.build()) + + ", \"aliases\" : { \"alias\": { \"is_write_index\": true } } }"); + client().performRequest(request); + // wait for the shards to initialize + ensureGreen(index); + + } + + private static void index(RestClient client, String index, String id, Object... fields) throws IOException { + XContentBuilder document = jsonBuilder().startObject(); + for (int i = 0; i < fields.length; i += 2) { + document.field((String) fields[i], fields[i + 1]); + } + document.endObject(); + final Request request = new Request("POST", "/" + index + "/_doc/" + id); + request.setJsonEntity(Strings.toString(document)); + assertOK(client.performRequest(request)); + } + + @SuppressWarnings("unchecked") + private Map getOnlyIndexSettings(String index) throws IOException { + Map response = (Map) getIndexSettings(index).get(index); + if (response == null) { + return Collections.emptyMap(); + } + return (Map) response.get("settings"); + } + + private StepKey getStepKeyForIndex(String indexName) throws IOException { + Request explainRequest = new Request("GET", indexName + "/_ilm/explain"); + Response response = client().performRequest(explainRequest); + Map responseMap; + try (InputStream is = response.getEntity().getContent()) { + responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + + @SuppressWarnings("unchecked") Map indexResponse = ((Map>) responseMap.get("indices")) + .get(indexName); + if (indexResponse == null) { + return new StepKey(null, null, null); + } + String phase = indexResponse.get("phase"); + String action = indexResponse.get("action"); + String step = indexResponse.get("step"); + return new StepKey(phase, action, step); + } + + private void indexDocument() throws IOException { + Request indexRequest = new Request("POST", index + "/_doc"); + indexRequest.setEntity(new StringEntity("{\"a\": \"test\"}", ContentType.APPLICATION_JSON)); + Response response = client().performRequest(indexRequest); + logger.info(response.getStatusLine()); + } +} diff --git a/x-pack/plugin/ilm/qa/rest/build.gradle b/x-pack/plugin/ilm/qa/rest/build.gradle new file mode 100644 index 0000000000000..0f1e277e70d26 --- /dev/null +++ b/x-pack/plugin/ilm/qa/rest/build.gradle @@ -0,0 +1,45 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('ilm'), configuration: 'runtime') +} + +task restTest(type: RestIntegTestTask) { + mustRunAfter(precommit) +} + +def clusterCredentials = [username: System.getProperty('tests.rest.cluster.username', 'test_admin'), + password: System.getProperty('tests.rest.cluster.password', 'x-pack-test-password')] + + +restTestRunner { + systemProperty 'tests.rest.cluster.username', clusterCredentials.username + systemProperty 'tests.rest.cluster.password', clusterCredentials.password +} + +restTestCluster { + distribution 'zip' + setting 'xpack.ilm.enabled', 'true' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.security.enabled', 'true' + setting 'xpack.license.self_generated.type', 'trial' + setupCommand 'setup-admin-user', + 'bin/elasticsearch-users', 'useradd', clusterCredentials.username, '-p', clusterCredentials.password, '-r', 'superuser' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: clusterCredentials.username, + password: clusterCredentials.password, + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} + +check.dependsOn restTest +test.enabled = false diff --git a/x-pack/plugin/ilm/qa/rest/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRestIT.java b/x-pack/plugin/ilm/qa/rest/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRestIT.java new file mode 100644 index 0000000000000..f784e2b940bfe --- /dev/null +++ b/x-pack/plugin/ilm/qa/rest/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRestIT.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; +import org.apache.lucene.util.TimeUnits; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +import java.util.Objects; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + +@TimeoutSuite(millis = 30 * TimeUnits.MINUTE) // as default timeout seems not enough on the jenkins VMs +public class IndexLifecycleRestIT extends ESClientYamlSuiteTestCase { + + private static final String USER = Objects.requireNonNull(System.getProperty("tests.rest.cluster.username")); + private static final String PASS = Objects.requireNonNull(System.getProperty("tests.rest.cluster.password")); + + public IndexLifecycleRestIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray())); + return Settings.builder() + .put(super.restClientSettings()) + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } +} + diff --git a/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/10_basic.yml b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/10_basic.yml new file mode 100644 index 0000000000000..385430c1bf704 --- /dev/null +++ b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/10_basic.yml @@ -0,0 +1,218 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +"Test Basic Policy CRUD": + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" + + - do: + catch: missing + ilm.delete_lifecycle: + policy: "my_timeseries_lifecycle" + + - do: + ilm.put_lifecycle: + policy: "my_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "10s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "delete": { + "min_age": "30s", + "actions": { + "delete": {} + } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" + - match: { my_timeseries_lifecycle.version: 1 } + - is_true: my_timeseries_lifecycle.modified_date + - match: { my_timeseries_lifecycle.policy.phases.warm.min_age: "10s" } + - match: { my_timeseries_lifecycle.policy.phases.delete.min_age: "30s" } + + - do: + ilm.delete_lifecycle: + policy: "my_timeseries_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" + +--- +"Test Policy Update": + - do: + ilm.put_lifecycle: + policy: "my_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "10s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "delete": { + "min_age": "30s", + "actions": { + "delete": {} + } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" + - match: { my_timeseries_lifecycle.version: 1 } + - is_true: my_timeseries_lifecycle.modified_date + - match: { my_timeseries_lifecycle.policy.phases.warm.min_age: "10s" } + - match: { my_timeseries_lifecycle.policy.phases.delete.min_age: "30s" } + + + - do: + indices.create: + index: my_index + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: my_index2 + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + ilm.put_lifecycle: + policy: "my_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "300s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "delete": { + "min_age": "600s", + "actions": { + "delete": {} + } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" + - match: { my_timeseries_lifecycle.version: 2 } + - is_true: my_timeseries_lifecycle.modified_date + - match: { my_timeseries_lifecycle.policy.phases.warm.min_age: "300s" } + - match: { my_timeseries_lifecycle.policy.phases.delete.min_age: "600s" } + + - do: + indices.delete: + index: my_index + - do: + indices.delete: + index: my_index2 + + - do: + ilm.delete_lifecycle: + policy: "my_timeseries_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" + +--- +"Test Undeletable Policy In Use": + - do: + ilm.put_lifecycle: + policy: "my_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "10s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "delete": { + "min_age": "30s", + "actions": { + "delete": {} + } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" + - match: { my_timeseries_lifecycle.policy.phases.warm.min_age: "10s" } + - match: { my_timeseries_lifecycle.policy.phases.delete.min_age: "30s" } + + - do: + indices.create: + index: my_timeseries_index + body: + settings: + index.lifecycle.name: "my_timeseries_lifecycle" + + - do: + catch: bad_request + ilm.delete_lifecycle: + policy: "my_timeseries_lifecycle" + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Cannot delete policy [my_timeseries_lifecycle]. It is being used by at least one index [my_timeseries_index]" } + + - do: + ilm.remove_policy: + index: my_timeseries_index + + - do: + ilm.delete_lifecycle: + policy: "my_timeseries_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" diff --git a/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/20_move_to_step.yml b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/20_move_to_step.yml new file mode 100644 index 0000000000000..57223188d655b --- /dev/null +++ b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/20_move_to_step.yml @@ -0,0 +1,179 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + - do: + ilm.put_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "1000s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "hot": { + "min_age": "1000s", + "actions": { } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: my_index + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: my_index_no_policy + +--- +teardown: + + - do: + indices.delete: + index: my_index + + - do: + indices.delete: + index: my_index_no_policy + + - do: + ilm.delete_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + +--- +"Test Basic Move To Step": + + - do: + ilm.move_to_step: + index: "my_index" + body: + current_step: + phase: "new" + action: "complete" + name: "complete" + next_step: + phase: "warm" + action: "forcemerge" + name: "forcemerge" + + - do: + ilm.explain_lifecycle: + index: "my_index" + - match: { indices.my_index.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.my_index.step: "forcemerge" } + - match: { indices.my_index.action: "forcemerge" } + - match: { indices.my_index.phase: "warm" } + +--- +"Test Invalid Move To Step With Incorrect Current Step": + + - do: + catch: bad_request + ilm.move_to_step: + index: "my_index" + body: + current_step: + phase: "warm" + action: "forcemerge" + name: "forcemerge" + next_step: + phase: "warm" + action: "forcemerge" + name: "forcemerge" + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "index [my_index] is not on current step [{\"phase\":\"warm\",\"action\":\"forcemerge\",\"name\":\"forcemerge\"}]" } + + + - do: + ilm.explain_lifecycle: + index: "my_index" + - match: { indices.my_index.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.my_index.step: "complete" } + - match: { indices.my_index.action: "complete" } + - match: { indices.my_index.phase: "new" } + +--- +"Test Invalid Move To Step With Invalid Next Step": + + - do: + catch: bad_request + ilm.move_to_step: + index: "my_index" + body: + current_step: + phase: "new" + action: "complete" + name: "complete" + next_step: + phase: "invalid" + action: "invalid" + name: "invalid" + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "step [{\"phase\":\"invalid\",\"action\":\"invalid\",\"name\":\"invalid\"}] for index [my_index] with policy [my_moveable_timeseries_lifecycle] does not exist" } + + - do: + ilm.explain_lifecycle: + index: "my_index" + - match: { indices.my_index.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.my_index.step: "complete" } + - match: { indices.my_index.action: "complete" } + - match: { indices.my_index.phase: "new" } + +--- +"Test Invalid Move To Step With Invalid Policy": + + - do: + catch: bad_request + ilm.move_to_step: + index: "my_index_no_policy" + body: + current_step: + phase: "hot" + action: "pre-pre-readonly" + name: "after" + next_step: + phase: "warm" + action: "forcemerge" + name: "forcemerge" + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "index [my_index_no_policy] is not associated with an Index Lifecycle Policy" } + +--- +"Test Invalid Move To Step With Invalid Index": + + - do: + catch: bad_request + ilm.move_to_step: + index: "does_not_exist" + body: + current_step: + phase: "hot" + action: "pre-pre-readonly" + name: "after" + next_step: + phase: "warm" + action: "forcemerge" + name: "forcemerge" + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "index [does_not_exist] does not exist" } diff --git a/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/30_retry.yml b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/30_retry.yml new file mode 100644 index 0000000000000..c6bdfb2a05e14 --- /dev/null +++ b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/30_retry.yml @@ -0,0 +1,101 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + + - do: + ilm.put_lifecycle: + policy: "my_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "1000s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "hot": { + "min_age": "1000s", + "actions": { } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_lifecycle" + +--- +teardown: + + - do: + indices.delete: + index: my_index + + - do: + ilm.delete_lifecycle: + policy: "my_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_lifecycle" + +--- +"Test Invalid Retry With Non-errored Policy": + + - do: + indices.create: + index: my_index + body: + settings: + index.lifecycle.name: "my_lifecycle" + + - do: + catch: bad_request + ilm.retry: + index: "my_index" + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "cannot retry an action for an index [my_index] that has not encountered an error when running a Lifecycle Policy" } + + - do: + ilm.explain_lifecycle: + index: "my_index" + - match: { indices.my_index.policy: "my_lifecycle" } + - match: { indices.my_index.step: "complete" } + - match: { indices.my_index.action: "complete" } + - match: { indices.my_index.phase: "new" } + + +--- +"Test Invalid Retry With No Policy": + + - do: + indices.create: + index: my_index + + - do: + catch: bad_request + ilm.retry: + index: "my_index" + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "cannot retry an action for an index [my_index] that has not encountered an error when running a Lifecycle Policy" } + +--- +"Test Invalid Re-run With Invalid Index": + - do: + indices.create: + index: my_index + + - do: + catch: bad_request + ilm.retry: + index: "does_not_exist" + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "index [does_not_exist] does not exist" } diff --git a/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/40_explain_lifecycle.yml b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/40_explain_lifecycle.yml new file mode 100644 index 0000000000000..ab6f88821d003 --- /dev/null +++ b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/40_explain_lifecycle.yml @@ -0,0 +1,263 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + - do: + ilm.put_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "1000s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "hot": { + "min_age": "1000s", + "actions": { } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: my_index + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: my_index2 + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: another_index + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: unmanaged_index + body: + settings: {} + + - do: + indices.create: + index: my_index_no_policy + +--- +teardown: + + - do: + indices.delete: + index: my_index + - do: + indices.delete: + index: my_index2 + - do: + indices.delete: + index: another_index + - do: + indices.delete: + index: unmanaged_index + + - do: + indices.delete: + index: my_index_no_policy + + - do: + ilm.delete_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + +--- +"Test Basic Lifecycle Explain": + + - do: + ilm.explain_lifecycle: + index: "my_index" + + - is_true: indices.my_index.managed + - match: { indices.my_index.index: "my_index" } + - match: { indices.my_index.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.my_index.phase: "new" } + - match: { indices.my_index.action: "complete" } + - match: { indices.my_index.step: "complete" } + - is_true: indices.my_index.phase_time + - is_false: indices.my_index.failed_step + - is_false: indices.my_index.step_info + - is_false: indices.my_index.phase_execution + + - is_false: indices.my_index2 + - is_false: indices.another_index + - is_false: indices.unmanaged_index + +--- +"Test Wildcard Index Lifecycle Explain": + + - do: + ilm.explain_lifecycle: + index: "my_*" + + - is_true: indices.my_index.managed + - match: { indices.my_index.index: "my_index" } + - match: { indices.my_index.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.my_index.phase: "new" } + - match: { indices.my_index.action: "complete" } + - match: { indices.my_index.step: "complete" } + - is_true: indices.my_index.phase_time + - is_false: indices.my_index.failed_step + - is_false: indices.my_index.step_info + - is_false: indices.my_index.phase_execution + + - is_true: indices.my_index2.managed + - match: { indices.my_index2.index: "my_index2" } + - match: { indices.my_index2.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.my_index2.phase: "new" } + - match: { indices.my_index2.action: "complete" } + - match: { indices.my_index2.step: "complete" } + - is_true: indices.my_index2.phase_time + - is_false: indices.my_index2.failed_step + - is_false: indices.my_index2.step_info + - is_false: indices.my_index2.phase_execution + + - is_false: indices.another_index + - is_false: indices.unmanaged_index + + +--- +"Test All Indexes Lifecycle Explain": + + - do: + ilm.explain_lifecycle: + index: "*" + + - is_true: indices.my_index.managed + - match: { indices.my_index.index: "my_index" } + - match: { indices.my_index.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.my_index.phase: "new" } + - match: { indices.my_index.action: "complete" } + - match: { indices.my_index.step: "complete" } + - is_true: indices.my_index.phase_time + - is_false: indices.my_index.failed_step + - is_false: indices.my_index.step_info + - is_false: indices.my_index.phase_execution + + - is_true: indices.my_index2.managed + - match: { indices.my_index2.index: "my_index2" } + - match: { indices.my_index2.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.my_index2.phase: "new" } + - match: { indices.my_index2.action: "complete" } + - match: { indices.my_index2.step: "complete" } + - is_true: indices.my_index2.phase_time + - is_false: indices.my_index2.failed_step + - is_false: indices.my_index2.step_info + - is_false: indices.my_index2.phase_execution + + - is_true: indices.another_index.managed + - match: { indices.another_index.index: "another_index" } + - match: { indices.another_index.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.another_index.phase: "new" } + - match: { indices.another_index.action: "complete" } + - match: { indices.another_index.step: "complete" } + - is_true: indices.another_index.phase_time + - is_false: indices.another_index.failed_step + - is_false: indices.another_index.step_info + - is_false: indices.another_index.phase_execution + + - match: { indices.unmanaged_index.index: "unmanaged_index" } + - is_false: indices.unmanaged_index.managed + - is_false: indices.unmanaged_index.policy + - is_false: indices.unmanaged_index.phase + - is_false: indices.unmanaged_index.action + - is_false: indices.unmanaged_index.step + - is_false: indices.another_index.failed_step + - is_false: indices.another_index.step_info + +--- +"Test Unmanaged Index Lifecycle Explain": + + - do: + ilm.explain_lifecycle: + index: "unmanaged_index" + + - match: { indices.unmanaged_index.index: "unmanaged_index" } + - is_false: indices.unmanaged_index.managed + - is_false: indices.unmanaged_index.policy + - is_false: indices.unmanaged_index.phase + - is_false: indices.unmanaged_index.action + - is_false: indices.unmanaged_index.step + - is_false: indices.unmanaged_index.phase_execution + - is_false: indices.another_index.failed_step + - is_false: indices.another_index.step_info + - is_false: indices.my_index + - is_false: indices.my_index2 + - is_false: indices.another_index + +--- +"Test new phase still has phase_time": + + - do: + ilm.put_lifecycle: + policy: "mypolicy" + body: | + { + "policy": { + "phases": { + "hot": { + "min_age": "1000s", + "actions": {} + }, + "warm": { + "min_age": "2000s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + } + } + } + } + + - do: + indices.create: + index: foo + body: + settings: + index.lifecycle.name: "mypolicy" + + - do: + ilm.explain_lifecycle: + index: "foo" + + - is_true: indices.foo.managed + - match: { indices.foo.index: "foo" } + - match: { indices.foo.policy: "mypolicy" } + - match: { indices.foo.phase: "new" } + - match: { indices.foo.action: "complete" } + - match: { indices.foo.step: "complete" } + - is_true: indices.foo.phase_time + - is_false: indices.foo.failed_step + - is_false: indices.foo.step_info + - is_false: indices.foo.phase_execution diff --git a/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/60_operation_mode.yml b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/60_operation_mode.yml new file mode 100644 index 0000000000000..e8abc5b2c8d4e --- /dev/null +++ b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/60_operation_mode.yml @@ -0,0 +1,63 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +"Test Changing Operation Modes": + - do: + ilm.get_status: {} + - match: { operation_mode: "RUNNING" } + + - do: + ilm.put_lifecycle: + policy: "my_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "10s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "delete": { + "min_age": "30s", + "actions": { + "delete": {} + } + } + } + } + } + + - do: + ilm.get_status: {} + - match: { operation_mode: "RUNNING" } + + - do: + ilm.stop: {} + + - do: + ilm.get_status: {} + - match: { operation_mode: /STOPP(ED|ING)/ } + + - do: + ilm.start: {} + + - do: + ilm.get_status: {} + - match: { operation_mode: "RUNNING" } + + - do: + ilm.delete_lifecycle: + policy: "my_timeseries_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" diff --git a/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/60_remove_policy_for_index.yml b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/60_remove_policy_for_index.yml new file mode 100644 index 0000000000000..c9537d9779733 --- /dev/null +++ b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/60_remove_policy_for_index.yml @@ -0,0 +1,203 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + - do: + ilm.put_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "1000s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "hot": { + "min_age": "1000s", + "actions": { } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + + - do: + ilm.put_lifecycle: + policy: "my_alternative_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "1000s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "hot": { + "min_age": "1000s", + "actions": { } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_alternative_timeseries_lifecycle" + + - do: + indices.create: + index: my_index + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: my_index2 + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: another_index + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: unmanaged_index + body: + settings: {} + + - do: + indices.create: + index: my_index_no_policy + +--- +teardown: + + - do: + indices.delete: + index: my_index + - do: + indices.delete: + index: my_index2 + - do: + indices.delete: + index: another_index + - do: + indices.delete: + index: unmanaged_index + + - do: + indices.delete: + index: my_index_no_policy + + - do: + ilm.delete_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + + - do: + ilm.delete_lifecycle: + policy: "my_alternative_timeseries_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_alternative_timeseries_lifecycle" + +--- +"Test Remove Policy Single Index": + + - do: + indices.get_settings: + index: "another_index" + + - match: { another_index.settings.index.lifecycle.name: my_moveable_timeseries_lifecycle } + + - do: + ilm.remove_policy: + index: "another_index" + + - is_false: has_failures + - length: { failed_indexes: 0 } + + - do: + indices.get_settings: + index: "another_index" + + - is_false: another_index.settings.index.lifecycle + +--- +"Test Remove Policy Index Pattern": + + - do: + indices.get_settings: + index: "my_*" + + - match: { my_index.settings.index.lifecycle.name: my_moveable_timeseries_lifecycle } + - match: { my_index2.settings.index.lifecycle.name: my_moveable_timeseries_lifecycle } + + - do: + ilm.remove_policy: + index: "my_*" + + - is_false: has_failures + - length: { failed_indexes: 0 } + + - do: + indices.get_settings: + index: "my_*" + + - is_false: my_index.settings.index.lifecycle + - is_false: my_index2.settings.index.lifecycle + +--- +"Test Remove Policy Unmanaged Index": + + - do: + indices.get_settings: + index: "unmanaged_index" + + - is_false: unmanaged_index.settings.index.lifecycle.name + + - do: + ilm.remove_policy: + index: "unmanaged_index" + + - is_false: has_failures + - length: { failed_indexes: 0 } + + - do: + indices.get_settings: + index: "unmanaged_index" + + - is_false: unmanaged_index.settings.index.lifecycle + +--- +"Test Remove Policy Index Does Not Exist": + + - do: + catch: missing + ilm.remove_policy: + index: "doesnt_exist" diff --git a/x-pack/plugin/ilm/qa/with-security/build.gradle b/x-pack/plugin/ilm/qa/with-security/build.gradle new file mode 100644 index 0000000000000..f1b972012e7f8 --- /dev/null +++ b/x-pack/plugin/ilm/qa/with-security/build.gradle @@ -0,0 +1,43 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts') +} + +def clusterCredentials = [username: System.getProperty('tests.rest.cluster.username', 'test_admin'), + password: System.getProperty('tests.rest.cluster.password', 'x-pack-test-password')] + +integTestRunner { + systemProperty 'tests.rest.cluster.username', clusterCredentials.username + systemProperty 'tests.rest.cluster.password', clusterCredentials.password +} + +integTestCluster { + setting 'xpack.ilm.enabled', 'true' + setting 'xpack.security.enabled', 'true' + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + extraConfigFile 'roles.yml', 'roles.yml' + setupCommand 'setupIlmUser', + 'bin/elasticsearch-users', + 'useradd', "test_ilm", + '-p', 'x-pack-test-password', '-r', "ilm" + setupCommand 'setupDummyUser', + 'bin/elasticsearch-users', + 'useradd', clusterCredentials.username, + '-p', clusterCredentials.password, + '-r', 'superuser' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: clusterCredentials.username, + password: clusterCredentials.password, + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/x-pack/plugin/ilm/qa/with-security/roles.yml b/x-pack/plugin/ilm/qa/with-security/roles.yml new file mode 100644 index 0000000000000..baf89bea34568 --- /dev/null +++ b/x-pack/plugin/ilm/qa/with-security/roles.yml @@ -0,0 +1,11 @@ +ilm: + cluster: + - monitor + - manage + indices: + - names: [ 'ilm-*' ] + privileges: + - monitor + - manage + - read + - write \ No newline at end of file diff --git a/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java b/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java new file mode 100644 index 0000000000000..01eb07bb35b4f --- /dev/null +++ b/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java @@ -0,0 +1,140 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.junit.Before; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.equalTo; + +public class PermissionsIT extends ESRestTestCase { + + private String deletePolicy = "deletePolicy"; + private Settings indexSettingsWithPolicy; + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("test_ilm", new SecureString("x-pack-test-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue("test_admin", new SecureString("x-pack-test-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Before + public void init() throws Exception { + Request request = new Request("PUT", "/_cluster/settings"); + XContentBuilder pollIntervalEntity = JsonXContent.contentBuilder(); + pollIntervalEntity.startObject(); + pollIntervalEntity.startObject("transient"); + pollIntervalEntity.field(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, "1s"); + pollIntervalEntity.endObject(); + pollIntervalEntity.endObject(); + request.setJsonEntity(Strings.toString(pollIntervalEntity)); + assertOK(adminClient().performRequest(request)); + indexSettingsWithPolicy = Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, deletePolicy) + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createNewSingletonPolicy(deletePolicy,"delete", new DeleteAction()); + } + + /** + * Tests that a policy that simply deletes an index after 0s succeeds when an index + * with user `test_admin` is created referencing a policy created by `test_ilm` when both + * users have read/write permissions on the the index. The goal is to verify that one + * does not need to be the same user who created both the policy and the index to have the + * index be properly managed by ILM. + */ + public void testCanManageIndexAndPolicyDifferentUsers() throws Exception { + String index = "ilm-00001"; + createIndexAsAdmin(index, indexSettingsWithPolicy, ""); + assertBusy(() -> assertFalse(indexExists(index))); + } + + /** + * This tests the awkward behavior where an admin can have permissions to create a policy, + * but then not have permissions to operate on an index that was later associated with that policy by another + * user + */ + @SuppressWarnings("unchecked") + public void testCanManageIndexWithNoPermissions() throws Exception { + createIndexAsAdmin("not-ilm", indexSettingsWithPolicy, ""); + Request request = new Request("GET", "/not-ilm/_ilm/explain"); + // test_ilm user does not have permissions on this index + ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.FORBIDDEN.getStatus())); + + assertBusy(() -> { + Response response = adminClient().performRequest(request); + assertOK(response); + try (InputStream is = response.getEntity().getContent()) { + Map mapResponse = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + Map indexExplain = (Map) ((Map) mapResponse.get("indices")).get("not-ilm"); + assertThat(indexExplain.get("managed"), equalTo(true)); + assertThat(indexExplain.get("step"), equalTo("ERROR")); + assertThat(indexExplain.get("failed_step"), equalTo("delete")); + Map stepInfo = (Map) indexExplain.get("step_info"); + assertThat(stepInfo.get("type"), equalTo("security_exception")); + assertThat(stepInfo.get("reason"), equalTo("action [indices:admin/delete] is unauthorized for user [test_ilm]")); + } + }); + } + + private void createNewSingletonPolicy(String policy, String phaseName, LifecycleAction action) throws IOException { + Phase phase = new Phase(phaseName, TimeValue.ZERO, singletonMap(action.getWriteableName(), action)); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, singletonMap(phase.getName(), phase)); + XContentBuilder builder = jsonBuilder(); + lifecyclePolicy.toXContent(builder, null); + final StringEntity entity = new StringEntity( + "{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); + Request request = new Request("PUT", "_ilm/policy/" + policy); + request.setEntity(entity); + client().performRequest(request); + } + + private void createIndexAsAdmin(String name, Settings settings, String mapping) throws IOException { + Request request = new Request("PUT", "/" + name); + request.setJsonEntity("{\n \"settings\": " + Strings.toString(settings) + + ", \"mappings\" : {" + mapping + "} }"); + assertOK(adminClient().performRequest(request)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTask.java new file mode 100644 index 0000000000000..9e5ef7b01c5f6 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTask.java @@ -0,0 +1,192 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.index.Index; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateActionStep; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateWaitStep; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; + +import java.io.IOException; +import java.util.function.LongSupplier; + +public class ExecuteStepsUpdateTask extends ClusterStateUpdateTask { + private static final Logger logger = LogManager.getLogger(ExecuteStepsUpdateTask.class); + private final String policy; + private final Index index; + private final Step startStep; + private final PolicyStepsRegistry policyStepsRegistry; + private final IndexLifecycleRunner lifecycleRunner; + private LongSupplier nowSupplier; + private Step.StepKey nextStepKey = null; + + public ExecuteStepsUpdateTask(String policy, Index index, Step startStep, PolicyStepsRegistry policyStepsRegistry, + IndexLifecycleRunner lifecycleRunner, LongSupplier nowSupplier) { + this.policy = policy; + this.index = index; + this.startStep = startStep; + this.policyStepsRegistry = policyStepsRegistry; + this.nowSupplier = nowSupplier; + this.lifecycleRunner = lifecycleRunner; + } + + String getPolicy() { + return policy; + } + + Index getIndex() { + return index; + } + + Step getStartStep() { + return startStep; + } + + Step.StepKey getNextStepKey() { + return nextStepKey; + } + + /** + * {@link Step}s for the current index and policy are executed in succession until the next step to be + * executed is not a {@link ClusterStateActionStep}, or not a {@link ClusterStateWaitStep}, or does not + * belong to the same phase as the executed step. All other types of steps are executed outside of this + * {@link ClusterStateUpdateTask}, so they are of no concern here. + * + * @param currentState The current state to execute the startStep with + * @return the new cluster state after cluster-state operations and step transitions are applied + * @throws IOException if any exceptions occur + */ + @Override + public ClusterState execute(final ClusterState currentState) throws IOException { + Step currentStep = startStep; + IndexMetaData indexMetaData = currentState.metaData().index(index); + if (indexMetaData == null) { + logger.debug("lifecycle for index [{}] executed but index no longer exists", index.getName()); + // This index doesn't exist any more, there's nothing to execute currently + return currentState; + } + Step registeredCurrentStep = IndexLifecycleRunner.getCurrentStep(policyStepsRegistry, policy, indexMetaData, + LifecycleExecutionState.fromIndexMetadata(indexMetaData)); + if (currentStep.equals(registeredCurrentStep)) { + ClusterState state = currentState; + // We can do cluster state steps all together until we + // either get to a step that isn't a cluster state step or a + // cluster state wait step returns not completed + while (currentStep instanceof ClusterStateActionStep || currentStep instanceof ClusterStateWaitStep) { + nextStepKey = currentStep.getNextStepKey(); + if (currentStep instanceof ClusterStateActionStep) { + // cluster state action step so do the action and + // move the cluster state to the next step + logger.trace("[{}] performing cluster state action ({}) [{}], next: [{}]", + index.getName(), currentStep.getClass().getSimpleName(), currentStep.getKey(), currentStep.getNextStepKey()); + try { + state = ((ClusterStateActionStep) currentStep).performAction(index, state); + } catch (Exception exception) { + return moveToErrorStep(state, currentStep.getKey(), exception); + } + if (currentStep.getNextStepKey() == null) { + return state; + } else { + state = IndexLifecycleRunner.moveClusterStateToNextStep(index, state, currentStep.getKey(), + currentStep.getNextStepKey(), nowSupplier); + } + } else { + // cluster state wait step so evaluate the + // condition, if the condition is met move to the + // next step, if its not met return the current + // cluster state so it can be applied and we will + // wait for the next trigger to evaluate the + // condition again + logger.trace("[{}] waiting for cluster state step condition ({}) [{}], next: [{}]", + index.getName(), currentStep.getClass().getSimpleName(), currentStep.getKey(), currentStep.getNextStepKey()); + ClusterStateWaitStep.Result result; + try { + result = ((ClusterStateWaitStep) currentStep).isConditionMet(index, state); + } catch (Exception exception) { + return moveToErrorStep(state, currentStep.getKey(), exception); + } + if (result.isComplete()) { + logger.trace("[{}] cluster state step condition met successfully ({}) [{}], moving to next step {}", + index.getName(), currentStep.getClass().getSimpleName(), currentStep.getKey(), currentStep.getNextStepKey()); + if (currentStep.getNextStepKey() == null) { + return state; + } else { + state = IndexLifecycleRunner.moveClusterStateToNextStep(index, state, currentStep.getKey(), + currentStep.getNextStepKey(), nowSupplier); + } + } else { + logger.trace("[{}] condition not met ({}) [{}], returning existing state", + index.getName(), currentStep.getClass().getSimpleName(), currentStep.getKey()); + // We may have executed a step and set "nextStepKey" to + // a value, but in this case, since the condition was + // not met, we can't advance any way, so don't attempt + // to run the current step + nextStepKey = null; + ToXContentObject stepInfo = result.getInfomationContext(); + if (stepInfo == null) { + return state; + } else { + return IndexLifecycleRunner.addStepInfoToClusterState(index, state, stepInfo); + } + } + } + // There are actions we need to take in the event a phase + // transition happens, so even if we would continue in the while + // loop, if we are about to go into a new phase, return so that + // other processing can occur + if (currentStep.getKey().getPhase().equals(currentStep.getNextStepKey().getPhase()) == false) { + return state; + } + currentStep = policyStepsRegistry.getStep(indexMetaData, currentStep.getNextStepKey()); + } + return state; + } else { + // either we are no longer the master or the step is now + // not the same as when we submitted the update task. In + // either case we don't want to do anything now + return currentState; + } + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + if (oldState.equals(newState) == false) { + IndexMetaData indexMetaData = newState.metaData().index(index); + if (nextStepKey != null && nextStepKey != TerminalPolicyStep.KEY && indexMetaData != null) { + logger.trace("[{}] step sequence starting with {} has completed, running next step {} if it is an async action", + index.getName(), startStep.getKey(), nextStepKey); + // After the cluster state has been processed and we have moved + // to a new step, we need to conditionally execute the step iff + // it is an `AsyncAction` so that it is executed exactly once. + lifecycleRunner.maybeRunAsyncAction(newState, indexMetaData, policy, nextStepKey); + } + } + } + + @Override + public void onFailure(String source, Exception e) { + throw new ElasticsearchException( + "policy [" + policy + "] for index [" + index.getName() + "] failed on step [" + startStep.getKey() + "].", e); + } + + private ClusterState moveToErrorStep(final ClusterState state, Step.StepKey currentStepKey, Exception cause) throws IOException { + logger.error("policy [{}] for index [{}] failed on cluster state step [{}]. Moving to ERROR step", policy, index.getName(), + currentStepKey); + MoveToErrorStepUpdateTask moveToErrorStepUpdateTask = new MoveToErrorStepUpdateTask(index, policy, currentStepKey, cause, + nowSupplier); + return moveToErrorStepUpdateTask.execute(state); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java new file mode 100644 index 0000000000000..1e42846b317d3 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java @@ -0,0 +1,209 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; +import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; +import org.elasticsearch.xpack.core.indexlifecycle.ForceMergeAction; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.ReadOnlyAction; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.ExplainLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetStatusAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.StartILMAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.StopILMAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestDeleteLifecycleAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestExplainLifecycleAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestGetLifecycleAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestGetStatusAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestMoveToStepAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestPutLifecycleAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestRemoveIndexLifecyclePolicyAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestRetryAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestStartILMAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestStopAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportDeleteLifecycleAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportExplainLifecycleAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportGetLifecycleAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportGetStatusAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportMoveToStepAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportPutLifecycleAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportRemoveIndexLifecyclePolicyAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportRetryAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportStartILMAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportStopILMAction; + +import java.time.Clock; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.function.Supplier; + +import static java.util.Collections.emptyList; + +public class IndexLifecycle extends Plugin implements ActionPlugin { + private final SetOnce indexLifecycleInitialisationService = new SetOnce<>(); + private Settings settings; + private boolean enabled; + private boolean transportClientMode; + + public IndexLifecycle(Settings settings) { + this.settings = settings; + this.enabled = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(settings); + this.transportClientMode = XPackPlugin.transportClientMode(settings); + } + + // overridable by tests + protected Clock getClock() { + return Clock.systemUTC(); + } + + public Collection createGuiceModules() { + List modules = new ArrayList<>(); + + if (transportClientMode) { + return modules; + } + + modules.add(b -> XPackPlugin.bindFeatureSet(b, IndexLifecycleFeatureSet.class)); + + return modules; + } + + @Override + public List> getSettings() { + return Arrays.asList( + LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING, + LifecycleSettings.LIFECYCLE_NAME_SETTING, + RolloverAction.LIFECYCLE_ROLLOVER_ALIAS_SETTING); + } + + @Override + public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, ScriptService scriptService, + NamedXContentRegistry xContentRegistry, Environment environment, + NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { + if (enabled == false || transportClientMode) { + return emptyList(); + } + indexLifecycleInitialisationService + .set(new IndexLifecycleService(settings, client, clusterService, getClock(), System::currentTimeMillis, xContentRegistry)); + return Collections.singletonList(indexLifecycleInitialisationService.get()); + } + + @Override + public List getNamedWriteables() { + return Arrays.asList(); + } + + @Override + public List getNamedXContent() { + return Arrays.asList( + // Custom Metadata + new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(IndexLifecycleMetadata.TYPE), + parser -> IndexLifecycleMetadata.PARSER.parse(parser, null)), + // Lifecycle Types + new NamedXContentRegistry.Entry(LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), + (p, c) -> TimeseriesLifecycleType.INSTANCE), + // Lifecycle Actions + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse) + ); + } + + @Override + public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster) { + if (enabled == false) { + return emptyList(); + } + return Arrays.asList( + new RestPutLifecycleAction(settings, restController), + new RestGetLifecycleAction(settings, restController), + new RestDeleteLifecycleAction(settings, restController), + new RestExplainLifecycleAction(settings, restController), + new RestRemoveIndexLifecyclePolicyAction(settings, restController), + new RestMoveToStepAction(settings, restController), + new RestRetryAction(settings, restController), + new RestStopAction(settings, restController), + new RestStartILMAction(settings, restController), + new RestGetStatusAction(settings, restController) + ); + } + + @Override + public List> getActions() { + if (enabled == false) { + return emptyList(); + } + return Arrays.asList( + new ActionHandler<>(PutLifecycleAction.INSTANCE, TransportPutLifecycleAction.class), + new ActionHandler<>(GetLifecycleAction.INSTANCE, TransportGetLifecycleAction.class), + new ActionHandler<>(DeleteLifecycleAction.INSTANCE, TransportDeleteLifecycleAction.class), + new ActionHandler<>(ExplainLifecycleAction.INSTANCE, TransportExplainLifecycleAction.class), + new ActionHandler<>(RemoveIndexLifecyclePolicyAction.INSTANCE, TransportRemoveIndexLifecyclePolicyAction.class), + new ActionHandler<>(MoveToStepAction.INSTANCE, TransportMoveToStepAction.class), + new ActionHandler<>(RetryAction.INSTANCE, TransportRetryAction.class), + new ActionHandler<>(StartILMAction.INSTANCE, TransportStartILMAction.class), + new ActionHandler<>(StopILMAction.INSTANCE, TransportStopILMAction.class), + new ActionHandler<>(GetStatusAction.INSTANCE, TransportGetStatusAction.class)); + } + + @Override + public void close() { + IndexLifecycleService lifecycleService = indexLifecycleInitialisationService.get(); + if (lifecycleService != null) { + lifecycleService.close(); + } + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSet.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSet.java new file mode 100644 index 0000000000000..2469621316889 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSet.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PhaseStats; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PolicyStats; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public class IndexLifecycleFeatureSet implements XPackFeatureSet { + + private final boolean enabled; + private final XPackLicenseState licenseState; + private ClusterService clusterService; + + @Inject + public IndexLifecycleFeatureSet(Settings settings, @Nullable XPackLicenseState licenseState, ClusterService clusterService) { + this.clusterService = clusterService; + this.enabled = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(settings); + this.licenseState = licenseState; + } + + @Override + public String name() { + return XPackField.INDEX_LIFECYCLE; + } + + @Override + public String description() { + return "Index lifecycle management for the Elastic Stack"; + } + + @Override + public boolean available() { + return licenseState != null && licenseState.isIndexLifecycleAllowed(); + } + + @Override + public boolean enabled() { + return enabled; + } + + @Override + public Map nativeCodeInfo() { + return null; + } + + @Override + public void usage(ActionListener listener) { + MetaData metaData = clusterService.state().metaData(); + IndexLifecycleMetadata lifecycleMetadata = metaData.custom(IndexLifecycleMetadata.TYPE); + if (enabled() && lifecycleMetadata != null) { + Map policyUsage = new HashMap<>(); + metaData.indices().forEach(entry -> { + String policyName = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(entry.value.getSettings()); + Integer indicesManaged = policyUsage.get(policyName); + if (indicesManaged == null) { + indicesManaged = 1; + } else { + indicesManaged = indicesManaged + 1; + } + policyUsage.put(policyName, indicesManaged); + }); + List policyStats = lifecycleMetadata.getPolicies().values().stream().map(policy -> { + Map phaseStats = policy.getPhases().values().stream().map(phase -> { + String[] actionNames = phase.getActions().keySet().toArray(new String[phase.getActions().size()]); + return new Tuple(phase.getName(), new PhaseStats(phase.getMinimumAge(), actionNames)); + }).collect(Collectors.toMap(Tuple::v1, Tuple::v2)); + return new PolicyStats(phaseStats, policyUsage.getOrDefault(policy.getName(), 0)); + }).collect(Collectors.toList()); + listener.onResponse(new IndexLifecycleFeatureSetUsage(available(), enabled(), policyStats)); + } else { + listener.onResponse(new IndexLifecycleFeatureSetUsage(available(), enabled())); + } + } + +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java new file mode 100644 index 0000000000000..f5e33fdb98079 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java @@ -0,0 +1,510 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.Index; +import org.elasticsearch.xpack.core.indexlifecycle.AsyncActionStep; +import org.elasticsearch.xpack.core.indexlifecycle.AsyncWaitStep; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateActionStep; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateWaitStep; +import org.elasticsearch.xpack.core.indexlifecycle.ErrorStep; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.InitializePolicyContextStep; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.PhaseCompleteStep; +import org.elasticsearch.xpack.core.indexlifecycle.PhaseExecutionInfo; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; + +import java.io.IOException; +import java.util.List; +import java.util.function.LongSupplier; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; + +public class IndexLifecycleRunner { + private static final Logger logger = LogManager.getLogger(IndexLifecycleRunner.class); + private PolicyStepsRegistry stepRegistry; + private ClusterService clusterService; + private LongSupplier nowSupplier; + + public IndexLifecycleRunner(PolicyStepsRegistry stepRegistry, ClusterService clusterService, LongSupplier nowSupplier) { + this.stepRegistry = stepRegistry; + this.clusterService = clusterService; + this.nowSupplier = nowSupplier; + } + + /** + * Return true or false depending on whether the index is ready to be in {@code phase} + */ + boolean isReadyToTransitionToThisPhase(final String policy, final IndexMetaData indexMetaData, final String phase) { + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); + if (lifecycleState.getLifecycleDate() == null) { + logger.trace("no index creation date has been set yet"); + return true; + } + final Long lifecycleDate = lifecycleState.getLifecycleDate(); + assert lifecycleDate != null && lifecycleDate >= 0 : "expected index to have a lifecycle date but it did not"; + final TimeValue after = stepRegistry.getIndexAgeForPhase(policy, phase); + final long now = nowSupplier.getAsLong(); + final TimeValue age = new TimeValue(now - lifecycleDate); + if (logger.isTraceEnabled()) { + logger.trace("[{}] checking for index age to be at least [{}] before performing actions in " + + "the \"{}\" phase. Now: {}, lifecycle date: {}, age: [{}/{}s]", + indexMetaData.getIndex().getName(), after, phase, + new TimeValue(now).seconds(), + new TimeValue(lifecycleDate).seconds(), + age, age.seconds()); + } + return now >= lifecycleDate + after.getMillis(); + } + + /** + * Run the current step, only if it is an asynchronous wait step. These + * wait criteria are checked periodically from the ILM scheduler + */ + public void runPeriodicStep(String policy, IndexMetaData indexMetaData) { + String index = indexMetaData.getIndex().getName(); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); + Step currentStep = getCurrentStep(stepRegistry, policy, indexMetaData, lifecycleState); + if (currentStep == null) { + if (stepRegistry.policyExists(policy) == false) { + markPolicyDoesNotExist(policy, indexMetaData.getIndex(), lifecycleState); + return; + } else { + logger.error("current step [{}] for index [{}] with policy [{}] is not recognized", + getCurrentStepKey(lifecycleState), index, policy); + return; + } + } + + if (currentStep instanceof TerminalPolicyStep) { + logger.debug("policy [{}] for index [{}] complete, skipping execution", policy, index); + return; + } else if (currentStep instanceof ErrorStep) { + logger.debug("policy [{}] for index [{}] on an error step, skipping execution", policy, index); + return; + } + + logger.trace("[{}] maybe running periodic step ({}) with current step {}", + index, currentStep.getClass().getSimpleName(), currentStep.getKey()); + // Only phase changing and async wait steps should be run through periodic polling + if (currentStep instanceof PhaseCompleteStep) { + // Only proceed to the next step if enough time has elapsed to go into the next phase + if (isReadyToTransitionToThisPhase(policy, indexMetaData, currentStep.getNextStepKey().getPhase())) { + moveToStep(indexMetaData.getIndex(), policy, currentStep.getKey(), currentStep.getNextStepKey()); + } + } else if (currentStep instanceof AsyncWaitStep) { + logger.debug("[{}] running periodic policy with current-step [{}]", index, currentStep.getKey()); + ((AsyncWaitStep) currentStep).evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + + @Override + public void onResponse(boolean conditionMet, ToXContentObject stepInfo) { + logger.trace("cs-change-async-wait-callback, [{}] current-step: {}", index, currentStep.getKey()); + if (conditionMet) { + moveToStep(indexMetaData.getIndex(), policy, currentStep.getKey(), currentStep.getNextStepKey()); + } else if (stepInfo != null) { + setStepInfo(indexMetaData.getIndex(), policy, currentStep.getKey(), stepInfo); + } + } + + @Override + public void onFailure(Exception e) { + moveToErrorStep(indexMetaData.getIndex(), policy, currentStep.getKey(), e); + } + }); + } else { + logger.trace("[{}] ignoring non periodic step execution from step transition [{}]", index, currentStep.getKey()); + } + } + + /** + * If the current step (matching the expected step key) is an asynchronous action step, run it + */ + public void maybeRunAsyncAction(ClusterState currentState, IndexMetaData indexMetaData, String policy, StepKey expectedStepKey) { + String index = indexMetaData.getIndex().getName(); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); + Step currentStep = getCurrentStep(stepRegistry, policy, indexMetaData, lifecycleState); + if (currentStep == null) { + logger.warn("current step [{}] for index [{}] with policy [{}] is not recognized", + getCurrentStepKey(lifecycleState), index, policy); + return; + } + + logger.trace("[{}] maybe running async action step ({}) with current step {}", + index, currentStep.getClass().getSimpleName(), currentStep.getKey()); + if (currentStep.getKey().equals(expectedStepKey) == false) { + throw new IllegalStateException("expected index [" + indexMetaData.getIndex().getName() + "] with policy [" + policy + + "] to have current step consistent with provided step key (" + expectedStepKey + ") but it was " + currentStep.getKey()); + } + if (currentStep instanceof AsyncActionStep) { + logger.debug("[{}] running policy with async action step [{}]", index, currentStep.getKey()); + ((AsyncActionStep) currentStep).performAction(indexMetaData, currentState, new AsyncActionStep.Listener() { + + @Override + public void onResponse(boolean complete) { + logger.trace("cs-change-async-action-callback, [{}], current-step: {}", index, currentStep.getKey()); + if (complete && ((AsyncActionStep) currentStep).indexSurvives()) { + moveToStep(indexMetaData.getIndex(), policy, currentStep.getKey(), currentStep.getNextStepKey()); + } + } + + @Override + public void onFailure(Exception e) { + moveToErrorStep(indexMetaData.getIndex(), policy, currentStep.getKey(), e); + } + }); + } else { + logger.trace("[{}] ignoring non async action step execution from step transition [{}]", index, currentStep.getKey()); + } + } + + /** + * Run the current step that either waits for index age, or updates/waits-on cluster state. + * Invoked after the cluster state has been changed + */ + public void runPolicyAfterStateChange(String policy, IndexMetaData indexMetaData) { + String index = indexMetaData.getIndex().getName(); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); + Step currentStep = getCurrentStep(stepRegistry, policy, indexMetaData, lifecycleState); + if (currentStep == null) { + if (stepRegistry.policyExists(policy) == false) { + markPolicyDoesNotExist(policy, indexMetaData.getIndex(), lifecycleState); + return; + } else { + logger.error("current step [{}] for index [{}] with policy [{}] is not recognized", + getCurrentStepKey(lifecycleState), index, policy); + return; + } + } + + if (currentStep instanceof TerminalPolicyStep) { + logger.debug("policy [{}] for index [{}] complete, skipping execution", policy, index); + return; + } else if (currentStep instanceof ErrorStep) { + logger.debug("policy [{}] for index [{}] on an error step, skipping execution", policy, index); + return; + } + + logger.trace("[{}] maybe running step ({}) after state change: {}", + index, currentStep.getClass().getSimpleName(), currentStep.getKey()); + if (currentStep instanceof PhaseCompleteStep) { + // Only proceed to the next step if enough time has elapsed to go into the next phase + if (isReadyToTransitionToThisPhase(policy, indexMetaData, currentStep.getNextStepKey().getPhase())) { + moveToStep(indexMetaData.getIndex(), policy, currentStep.getKey(), currentStep.getNextStepKey()); + } + } else if (currentStep instanceof ClusterStateActionStep || currentStep instanceof ClusterStateWaitStep) { + logger.debug("[{}] running policy with current-step [{}]", indexMetaData.getIndex().getName(), currentStep.getKey()); + clusterService.submitStateUpdateTask("ilm-execute-cluster-state-steps", + new ExecuteStepsUpdateTask(policy, indexMetaData.getIndex(), currentStep, stepRegistry, this, nowSupplier)); + } else { + logger.trace("[{}] ignoring step execution from cluster state change event [{}]", index, currentStep.getKey()); + } + } + + /** + * Retrieves the current {@link StepKey} from the index settings. Note that + * it is illegal for the step to be set with the phase and/or action unset, + * or for the step to be unset with the phase and/or action set. All three + * settings must be either present or missing. + * + * @param lifecycleState the index custom data to extract the {@link StepKey} from. + */ + public static StepKey getCurrentStepKey(LifecycleExecutionState lifecycleState) { + String currentPhase = lifecycleState.getPhase(); + String currentAction = lifecycleState.getAction(); + String currentStep = lifecycleState.getStep(); + if (Strings.isNullOrEmpty(currentStep)) { + assert Strings.isNullOrEmpty(currentPhase) : "Current phase is not empty: " + currentPhase; + assert Strings.isNullOrEmpty(currentAction) : "Current action is not empty: " + currentAction; + return null; + } else { + assert Strings.isNullOrEmpty(currentPhase) == false; + assert Strings.isNullOrEmpty(currentAction) == false; + return new StepKey(currentPhase, currentAction, currentStep); + } + } + + static Step getCurrentStep(PolicyStepsRegistry stepRegistry, String policy, IndexMetaData indexMetaData, + LifecycleExecutionState lifecycleState) { + StepKey currentStepKey = getCurrentStepKey(lifecycleState); + logger.trace("[{}] retrieved current step key: {}", indexMetaData.getIndex().getName(), currentStepKey); + if (currentStepKey == null) { + return stepRegistry.getFirstStep(policy); + } else { + return stepRegistry.getStep(indexMetaData, currentStepKey); + } + } + + /** + * This method is intended for handling moving to different steps from {@link TransportAction} executions. + * For this reason, it is reasonable to throw {@link IllegalArgumentException} when state is not as expected. + * + * @param indexName The index whose step is to change + * @param currentState The current {@link ClusterState} + * @param currentStepKey The current {@link StepKey} found for the index in the current cluster state + * @param nextStepKey The next step to move the index into + * @param nowSupplier The current-time supplier for updating when steps changed + * @param stepRegistry The steps registry to check a step-key's existence in the index's current policy + * @return The updated cluster state where the index moved to nextStepKey + */ + static ClusterState moveClusterStateToStep(String indexName, ClusterState currentState, StepKey currentStepKey, + StepKey nextStepKey, LongSupplier nowSupplier, + PolicyStepsRegistry stepRegistry) { + IndexMetaData idxMeta = currentState.getMetaData().index(indexName); + Settings indexSettings = idxMeta.getSettings(); + String indexPolicySetting = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexSettings); + + // policy could be updated in-between execution + if (Strings.isNullOrEmpty(indexPolicySetting)) { + throw new IllegalArgumentException("index [" + indexName + "] is not associated with an Index Lifecycle Policy"); + } + + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(idxMeta); + if (currentStepKey.equals(IndexLifecycleRunner.getCurrentStepKey(lifecycleState)) == false) { + throw new IllegalArgumentException("index [" + indexName + "] is not on current step [" + currentStepKey + "]"); + } + + if (stepRegistry.stepExists(indexPolicySetting, nextStepKey) == false) { + throw new IllegalArgumentException("step [" + nextStepKey + "] for index [" + idxMeta.getIndex().getName() + + "] with policy [" + indexPolicySetting + "] does not exist"); + } + + return IndexLifecycleRunner.moveClusterStateToNextStep(idxMeta.getIndex(), currentState, currentStepKey, nextStepKey, nowSupplier); + } + + static ClusterState moveClusterStateToNextStep(Index index, ClusterState clusterState, StepKey currentStep, StepKey nextStep, + LongSupplier nowSupplier) { + IndexMetaData idxMeta = clusterState.getMetaData().index(index); + IndexLifecycleMetadata ilmMeta = clusterState.metaData().custom(IndexLifecycleMetadata.TYPE); + LifecyclePolicyMetadata policyMetadata = ilmMeta.getPolicyMetadatas() + .get(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(idxMeta.getSettings())); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(idxMeta); + LifecycleExecutionState newLifecycleState = moveExecutionStateToNextStep(policyMetadata, + lifecycleState, currentStep, nextStep, nowSupplier); + ClusterState.Builder newClusterStateBuilder = newClusterStateWithLifecycleState(index, clusterState, newLifecycleState); + + return newClusterStateBuilder.build(); + } + + static ClusterState moveClusterStateToErrorStep(Index index, ClusterState clusterState, StepKey currentStep, Exception cause, + LongSupplier nowSupplier) throws IOException { + IndexMetaData idxMeta = clusterState.getMetaData().index(index); + IndexLifecycleMetadata ilmMeta = clusterState.metaData().custom(IndexLifecycleMetadata.TYPE); + LifecyclePolicyMetadata policyMetadata = ilmMeta.getPolicyMetadatas() + .get(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(idxMeta.getSettings())); + XContentBuilder causeXContentBuilder = JsonXContent.contentBuilder(); + causeXContentBuilder.startObject(); + ElasticsearchException.generateThrowableXContent(causeXContentBuilder, ToXContent.EMPTY_PARAMS, cause); + causeXContentBuilder.endObject(); + LifecycleExecutionState nextStepState = moveExecutionStateToNextStep(policyMetadata, + LifecycleExecutionState.fromIndexMetadata(idxMeta), currentStep, new StepKey(currentStep.getPhase(), + currentStep.getAction(), ErrorStep.NAME), nowSupplier); + LifecycleExecutionState.Builder failedState = LifecycleExecutionState.builder(nextStepState); + failedState.setFailedStep(currentStep.getName()); + failedState.setStepInfo(BytesReference.bytes(causeXContentBuilder).utf8ToString()); + ClusterState.Builder newClusterStateBuilder = newClusterStateWithLifecycleState(index, clusterState, failedState.build()); + return newClusterStateBuilder.build(); + } + + ClusterState moveClusterStateToFailedStep(ClusterState currentState, String[] indices) { + ClusterState newState = currentState; + for (String index : indices) { + IndexMetaData indexMetaData = currentState.metaData().index(index); + if (indexMetaData == null) { + throw new IllegalArgumentException("index [" + index + "] does not exist"); + } + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); + StepKey currentStepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + String failedStep = lifecycleState.getFailedStep(); + if (currentStepKey != null && ErrorStep.NAME.equals(currentStepKey.getName()) + && Strings.isNullOrEmpty(failedStep) == false) { + StepKey nextStepKey = new StepKey(currentStepKey.getPhase(), currentStepKey.getAction(), failedStep); + newState = moveClusterStateToStep(index, currentState, currentStepKey, nextStepKey, nowSupplier, stepRegistry); + } else { + throw new IllegalArgumentException("cannot retry an action for an index [" + + index + "] that has not encountered an error when running a Lifecycle Policy"); + } + } + return newState; + } + + private static LifecycleExecutionState moveExecutionStateToNextStep(LifecyclePolicyMetadata policyMetadata, + LifecycleExecutionState existingState, + StepKey currentStep, StepKey nextStep, + LongSupplier nowSupplier) { + long nowAsMillis = nowSupplier.getAsLong(); + LifecycleExecutionState.Builder updatedState = LifecycleExecutionState.builder(existingState); + updatedState.setPhase(nextStep.getPhase()); + updatedState.setAction(nextStep.getAction()); + updatedState.setStep(nextStep.getName()); + updatedState.setStepTime(nowAsMillis); + + // clear any step info or error-related settings from the current step + updatedState.setFailedStep(null); + updatedState.setStepInfo(null); + + if (currentStep.getPhase().equals(nextStep.getPhase()) == false) { + final String newPhaseDefinition; + final Phase nextPhase; + if ("new".equals(nextStep.getPhase()) || TerminalPolicyStep.KEY.equals(nextStep)) { + nextPhase = null; + } else { + nextPhase = policyMetadata.getPolicy().getPhases().get(nextStep.getPhase()); + } + PhaseExecutionInfo phaseExecutionInfo = new PhaseExecutionInfo(policyMetadata.getName(), nextPhase, + policyMetadata.getVersion(), policyMetadata.getModifiedDate()); + newPhaseDefinition = Strings.toString(phaseExecutionInfo, false, false); + updatedState.setPhaseDefinition(newPhaseDefinition); + updatedState.setPhaseTime(nowAsMillis); + } else if (currentStep.getPhase().equals(InitializePolicyContextStep.INITIALIZATION_PHASE)) { + // The "new" phase is the initialization phase, usually the phase + // time would be set on phase transition, but since there is no + // transition into the "new" phase, we set it any time in the "new" + // phase + updatedState.setPhaseTime(nowAsMillis); + } + + if (currentStep.getAction().equals(nextStep.getAction()) == false) { + updatedState.setActionTime(nowAsMillis); + } + return updatedState.build(); + } + + static ClusterState.Builder newClusterStateWithLifecycleState(Index index, ClusterState clusterState, + LifecycleExecutionState lifecycleState) { + ClusterState.Builder newClusterStateBuilder = ClusterState.builder(clusterState); + newClusterStateBuilder.metaData(MetaData.builder(clusterState.getMetaData()) + .put(IndexMetaData.builder(clusterState.getMetaData().index(index)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.asMap()))); + return newClusterStateBuilder; + } + + /** + * Conditionally updates cluster state with new step info. The new cluster state is only + * built if the step info has changed, otherwise the same old clusterState is + * returned + * + * @param index the index to modify + * @param clusterState the cluster state to modify + * @param stepInfo the new step info to update + * @return Updated cluster state with stepInfo if changed, otherwise the same cluster state + * if no changes to step info exist + * @throws IOException if parsing step info fails + */ + static ClusterState addStepInfoToClusterState(Index index, ClusterState clusterState, ToXContentObject stepInfo) throws IOException { + IndexMetaData indexMetaData = clusterState.getMetaData().index(index); + if (indexMetaData == null) { + // This index doesn't exist anymore, we can't do anything + return clusterState; + } + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); + final String stepInfoString; + try (XContentBuilder infoXContentBuilder = JsonXContent.contentBuilder()) { + stepInfo.toXContent(infoXContentBuilder, ToXContent.EMPTY_PARAMS); + stepInfoString = BytesReference.bytes(infoXContentBuilder).utf8ToString(); + } + if (stepInfoString.equals(lifecycleState.getStepInfo())) { + return clusterState; + } + LifecycleExecutionState.Builder newState = LifecycleExecutionState.builder(lifecycleState); + newState.setStepInfo(stepInfoString); + ClusterState.Builder newClusterStateBuilder = newClusterStateWithLifecycleState(index, clusterState, newState.build()); + return newClusterStateBuilder.build(); + } + + private void moveToStep(Index index, String policy, StepKey currentStepKey, StepKey nextStepKey) { + logger.debug("[{}] moving to step [{}] {} -> {}", index.getName(), policy, currentStepKey, nextStepKey); + clusterService.submitStateUpdateTask("ilm-move-to-step", + new MoveToNextStepUpdateTask(index, policy, currentStepKey, nextStepKey, nowSupplier, clusterState -> + { + IndexMetaData indexMetaData = clusterState.metaData().index(index); + if (nextStepKey != null && nextStepKey != TerminalPolicyStep.KEY && indexMetaData != null) { + maybeRunAsyncAction(clusterState, indexMetaData, policy, nextStepKey); + } + })); + } + + private void moveToErrorStep(Index index, String policy, StepKey currentStepKey, Exception e) { + logger.error(new ParameterizedMessage("policy [{}] for index [{}] failed on step [{}]. Moving to ERROR step", + policy, index.getName(), currentStepKey), e); + clusterService.submitStateUpdateTask("ilm-move-to-error-step", + new MoveToErrorStepUpdateTask(index, policy, currentStepKey, e, nowSupplier)); + } + + private void setStepInfo(Index index, String policy, StepKey currentStepKey, ToXContentObject stepInfo) { + clusterService.submitStateUpdateTask("ilm-set-step-info", new SetStepInfoUpdateTask(index, policy, currentStepKey, stepInfo)); + } + + public static ClusterState removePolicyForIndexes(final Index[] indices, ClusterState currentState, List failedIndexes) { + MetaData.Builder newMetadata = MetaData.builder(currentState.getMetaData()); + boolean clusterStateChanged = false; + for (Index index : indices) { + IndexMetaData indexMetadata = currentState.getMetaData().index(index); + if (indexMetadata == null) { + // Index doesn't exist so fail it + failedIndexes.add(index.getName()); + } else { + IndexMetaData.Builder newIdxMetadata = IndexLifecycleRunner.removePolicyForIndex(indexMetadata); + if (newIdxMetadata != null) { + newMetadata.put(newIdxMetadata); + clusterStateChanged = true; + } + } + } + if (clusterStateChanged) { + ClusterState.Builder newClusterState = ClusterState.builder(currentState); + newClusterState.metaData(newMetadata); + return newClusterState.build(); + } else { + return currentState; + } + } + + private static IndexMetaData.Builder removePolicyForIndex(IndexMetaData indexMetadata) { + Settings idxSettings = indexMetadata.getSettings(); + Settings.Builder newSettings = Settings.builder().put(idxSettings); + boolean notChanged = true; + + notChanged &= Strings.isNullOrEmpty(newSettings.remove(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey())); + notChanged &= Strings.isNullOrEmpty(newSettings.remove(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS_SETTING.getKey())); + long newSettingsVersion = notChanged ? indexMetadata.getSettingsVersion() : 1 + indexMetadata.getSettingsVersion(); + + IndexMetaData.Builder builder = IndexMetaData.builder(indexMetadata); + builder.removeCustom(ILM_CUSTOM_METADATA_KEY); + return builder.settings(newSettings).settingsVersion(newSettingsVersion); + } + + private void markPolicyDoesNotExist(String policyName, Index index, LifecycleExecutionState executionState) { + logger.debug("policy [{}] for index [{}] does not exist, recording this in step_info for this index", + policyName, index.getName()); + setStepInfo(index, policyName, getCurrentStepKey(executionState), + new SetStepInfoUpdateTask.ExceptionWrapper( + new IllegalArgumentException("policy [" + policyName + "] does not exist"))); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java new file mode 100644 index 0000000000000..1f4472daa2bd3 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java @@ -0,0 +1,267 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateApplier; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.LocalNodeMasterListener; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; + +import java.io.Closeable; +import java.time.Clock; +import java.util.Collections; +import java.util.Set; +import java.util.function.LongSupplier; + +/** + * A service which runs the {@link LifecyclePolicy}s associated with indexes. + */ +public class IndexLifecycleService extends AbstractComponent + implements ClusterStateListener, ClusterStateApplier, SchedulerEngine.Listener, Closeable, LocalNodeMasterListener { + private static final Logger logger = LogManager.getLogger(IndexLifecycleService.class); + private static final Set IGNORE_ACTIONS_MAINTENANCE_REQUESTED = Collections.singleton(ShrinkAction.NAME); + private volatile boolean isMaster = false; + private volatile TimeValue pollInterval; + + private final SetOnce scheduler = new SetOnce<>(); + private final Clock clock; + private final PolicyStepsRegistry policyRegistry; + private final IndexLifecycleRunner lifecycleRunner; + private final Settings settings; + private Client client; + private ClusterService clusterService; + private LongSupplier nowSupplier; + private SchedulerEngine.Job scheduledJob; + + public IndexLifecycleService(Settings settings, Client client, ClusterService clusterService, Clock clock, LongSupplier nowSupplier, + NamedXContentRegistry xContentRegistry) { + super(settings); + this.settings = settings; + this.client = client; + this.clusterService = clusterService; + this.clock = clock; + this.nowSupplier = nowSupplier; + this.scheduledJob = null; + this.policyRegistry = new PolicyStepsRegistry(xContentRegistry, client); + this.lifecycleRunner = new IndexLifecycleRunner(policyRegistry, clusterService, nowSupplier); + this.pollInterval = LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING.get(settings); + clusterService.addStateApplier(this); + clusterService.addListener(this); + clusterService.addLocalNodeMasterListener(this); + clusterService.getClusterSettings().addSettingsUpdateConsumer(LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING, + this::updatePollInterval); + } + + public void maybeRunAsyncAction(ClusterState clusterState, IndexMetaData indexMetaData, StepKey nextStepKey) { + String policyName = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexMetaData.getSettings()); + lifecycleRunner.maybeRunAsyncAction(clusterState, indexMetaData, policyName, nextStepKey); + } + + public ClusterState moveClusterStateToStep(ClusterState currentState, String indexName, StepKey currentStepKey, StepKey nextStepKey) { + return IndexLifecycleRunner.moveClusterStateToStep(indexName, currentState, currentStepKey, nextStepKey, + nowSupplier, policyRegistry); + } + + public ClusterState moveClusterStateToFailedStep(ClusterState currentState, String[] indices) { + return lifecycleRunner.moveClusterStateToFailedStep(currentState, indices); + } + + @Override + public void onMaster() { + this.isMaster = true; + maybeScheduleJob(); + + ClusterState clusterState = clusterService.state(); + IndexLifecycleMetadata currentMetadata = clusterState.metaData().custom(IndexLifecycleMetadata.TYPE); + if (currentMetadata != null) { + OperationMode currentMode = currentMetadata.getOperationMode(); + if (OperationMode.STOPPED.equals(currentMode)) { + return; + } + + boolean safeToStop = true; // true until proven false by a run policy + + // If we just became master, we need to kick off any async actions that + // may have not been run due to master rollover + for (ObjectCursor cursor : clusterState.metaData().indices().values()) { + IndexMetaData idxMeta = cursor.value; + String policyName = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(idxMeta.getSettings()); + if (Strings.isNullOrEmpty(policyName) == false) { + StepKey stepKey = IndexLifecycleRunner.getCurrentStepKey(LifecycleExecutionState.fromIndexMetadata(idxMeta)); + if (OperationMode.STOPPING == currentMode && + stepKey != null && + IGNORE_ACTIONS_MAINTENANCE_REQUESTED.contains(stepKey.getAction()) == false) { + logger.info("skipping policy [{}] for index [{}]. stopping Index Lifecycle execution", + policyName, idxMeta.getIndex().getName()); + continue; + } + lifecycleRunner.maybeRunAsyncAction(clusterState, idxMeta, policyName, stepKey); + safeToStop = false; // proven false! + } + } + if (safeToStop && OperationMode.STOPPING == currentMode) { + submitOperationModeUpdate(OperationMode.STOPPED); + } + } + } + + @Override + public void offMaster() { + this.isMaster = false; + cancelJob(); + } + + @Override + public String executorName() { + return ThreadPool.Names.MANAGEMENT; + } + + private void updatePollInterval(TimeValue newInterval) { + this.pollInterval = newInterval; + maybeScheduleJob(); + } + + // pkg-private for testing + SchedulerEngine getScheduler() { + return scheduler.get(); + } + + // pkg-private for testing + SchedulerEngine.Job getScheduledJob() { + return scheduledJob; + } + + private void maybeScheduleJob() { + if (this.isMaster) { + if (scheduler.get() == null) { + scheduler.set(new SchedulerEngine(settings, clock)); + scheduler.get().register(this); + } + scheduledJob = new SchedulerEngine.Job(XPackField.INDEX_LIFECYCLE, new TimeValueSchedule(pollInterval)); + scheduler.get().add(scheduledJob); + } + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + IndexLifecycleMetadata lifecycleMetadata = event.state().metaData().custom(IndexLifecycleMetadata.TYPE); + if (this.isMaster && lifecycleMetadata != null) { + triggerPolicies(event.state(), true); + } + } + + @Override + public void applyClusterState(ClusterChangedEvent event) { + if (event.localNodeMaster()) { // only act if we are master, otherwise + // keep idle until elected + if (event.state().metaData().custom(IndexLifecycleMetadata.TYPE) != null) { + policyRegistry.update(event.state()); + } + } + } + + private void cancelJob() { + if (scheduler.get() != null) { + scheduler.get().remove(XPackField.INDEX_LIFECYCLE); + scheduledJob = null; + } + } + + @Override + public void triggered(SchedulerEngine.Event event) { + if (event.getJobName().equals(XPackField.INDEX_LIFECYCLE)) { + logger.trace("job triggered: " + event.getJobName() + ", " + event.getScheduledTime() + ", " + event.getTriggeredTime()); + triggerPolicies(clusterService.state(), false); + } + } + + /** + * executes the policy execution on the appropriate indices by running cluster-state tasks per index. + * + * If stopping ILM was requested, and it is safe to stop, this will also be done here + * when possible after no policies are executed. + * + * @param clusterState the current cluster state + * @param fromClusterStateChange whether things are triggered from the cluster-state-listener or the scheduler + */ + void triggerPolicies(ClusterState clusterState, boolean fromClusterStateChange) { + IndexLifecycleMetadata currentMetadata = clusterState.metaData().custom(IndexLifecycleMetadata.TYPE); + + if (currentMetadata == null) { + return; + } + + OperationMode currentMode = currentMetadata.getOperationMode(); + + if (OperationMode.STOPPED.equals(currentMode)) { + return; + } + + boolean safeToStop = true; // true until proven false by a run policy + + // loop through all indices in cluster state and filter for ones that are + // managed by the Index Lifecycle Service they have a index.lifecycle.name setting + // associated to a policy + for (ObjectCursor cursor : clusterState.metaData().indices().values()) { + IndexMetaData idxMeta = cursor.value; + String policyName = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(idxMeta.getSettings()); + if (Strings.isNullOrEmpty(policyName) == false) { + StepKey stepKey = IndexLifecycleRunner.getCurrentStepKey(LifecycleExecutionState.fromIndexMetadata(idxMeta)); + if (OperationMode.STOPPING == currentMode && stepKey != null + && IGNORE_ACTIONS_MAINTENANCE_REQUESTED.contains(stepKey.getAction()) == false) { + logger.info("skipping policy [" + policyName + "] for index [" + idxMeta.getIndex().getName() + + "]. stopping Index Lifecycle execution"); + continue; + } + if (fromClusterStateChange) { + lifecycleRunner.runPolicyAfterStateChange(policyName, idxMeta); + } else { + lifecycleRunner.runPeriodicStep(policyName, idxMeta); + } + safeToStop = false; // proven false! + } + } + if (safeToStop && OperationMode.STOPPING == currentMode) { + submitOperationModeUpdate(OperationMode.STOPPED); + } + } + + @Override + public void close() { + SchedulerEngine engine = scheduler.get(); + if (engine != null) { + engine.stop(); + } + } + + public void submitOperationModeUpdate(OperationMode mode) { + clusterService.submitStateUpdateTask("ilm_operation_mode_update", + new OperationModeUpdateTask(mode)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicySecurityClient.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicySecurityClient.java new file mode 100644 index 0000000000000..01ff0243bc93b --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicySecurityClient.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.support.AbstractClient; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.indexlifecycle.Step; + +import java.util.Map; + +/** + * This class wraps a client and calls the client using the headers provided in + * constructor. The intent is to abstract away the fact that there are headers + * so {@link Step}s etc. can call this client as if it was a normal client. + * + * Note: This client will not close the wrapped {@link Client} instance since + * the intent is that the wrapped client is shared between multiple instances of + * this class. + */ +public class LifecyclePolicySecurityClient extends AbstractClient { + + private Client client; + private Map headers; + private String origin; + + public LifecyclePolicySecurityClient(Client client, String origin, Map headers) { + super(client.settings(), client.threadPool()); + this.client = client; + this.origin = origin; + this.headers = headers; + } + + @Override + public void close() { + // Doesn't close the wrapped client since this client object is shared + // among multiple instances + } + + @Override + protected < + Request extends ActionRequest, + Response extends ActionResponse, + RequestBuilder extends ActionRequestBuilder> + void doExecute(final Action action, final Request request, final ActionListener listener) { + ClientHelper.executeWithHeadersAsync(headers, origin, client, action, request, listener); + } + +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTask.java new file mode 100644 index 0000000000000..5af1a05309e7e --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTask.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.Step; + +import java.io.IOException; +import java.util.function.LongSupplier; + +public class MoveToErrorStepUpdateTask extends ClusterStateUpdateTask { + private final Index index; + private final String policy; + private final Step.StepKey currentStepKey; + private LongSupplier nowSupplier; + private Exception cause; + + public MoveToErrorStepUpdateTask(Index index, String policy, Step.StepKey currentStepKey, Exception cause, LongSupplier nowSupplier) { + this.index = index; + this.policy = policy; + this.currentStepKey = currentStepKey; + this.cause = cause; + this.nowSupplier = nowSupplier; + } + + Index getIndex() { + return index; + } + + String getPolicy() { + return policy; + } + + Step.StepKey getCurrentStepKey() { + return currentStepKey; + } + + Exception getCause() { + return cause; + } + + @Override + public ClusterState execute(ClusterState currentState) throws IOException { + IndexMetaData idxMeta = currentState.getMetaData().index(index); + if (idxMeta == null) { + // Index must have been since deleted, ignore it + return currentState; + } + Settings indexSettings = idxMeta.getSettings(); + LifecycleExecutionState indexILMData = LifecycleExecutionState.fromIndexMetadata(idxMeta); + if (policy.equals(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexSettings)) + && currentStepKey.equals(IndexLifecycleRunner.getCurrentStepKey(indexILMData))) { + return IndexLifecycleRunner.moveClusterStateToErrorStep(index, currentState, currentStepKey, cause, nowSupplier); + } else { + // either the policy has changed or the step is now + // not the same as when we submitted the update task. In + // either case we don't want to do anything now + return currentState; + } + } + + @Override + public void onFailure(String source, Exception e) { + throw new ElasticsearchException("policy [" + policy + "] for index [" + index.getName() + + "] failed trying to move from step [" + currentStepKey + "] to the ERROR step.", e); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTask.java new file mode 100644 index 0000000000000..750fd1af5da42 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTask.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.Step; + +import java.util.function.Consumer; +import java.util.function.LongSupplier; + +public class MoveToNextStepUpdateTask extends ClusterStateUpdateTask { + private static final Logger logger = LogManager.getLogger(MoveToNextStepUpdateTask.class); + + private final Index index; + private final String policy; + private final Step.StepKey currentStepKey; + private final Step.StepKey nextStepKey; + private final LongSupplier nowSupplier; + private final Consumer stateChangeConsumer; + + public MoveToNextStepUpdateTask(Index index, String policy, Step.StepKey currentStepKey, Step.StepKey nextStepKey, + LongSupplier nowSupplier, Consumer stateChangeConsumer) { + this.index = index; + this.policy = policy; + this.currentStepKey = currentStepKey; + this.nextStepKey = nextStepKey; + this.nowSupplier = nowSupplier; + this.stateChangeConsumer = stateChangeConsumer; + } + + Index getIndex() { + return index; + } + + String getPolicy() { + return policy; + } + + Step.StepKey getCurrentStepKey() { + return currentStepKey; + } + + Step.StepKey getNextStepKey() { + return nextStepKey; + } + + @Override + public ClusterState execute(ClusterState currentState) { + IndexMetaData indexMetaData = currentState.getMetaData().index(index); + if (indexMetaData == null) { + // Index must have been since deleted, ignore it + return currentState; + } + Settings indexSettings = indexMetaData.getSettings(); + LifecycleExecutionState indexILMData = LifecycleExecutionState.fromIndexMetadata(currentState.getMetaData().index(index)); + if (policy.equals(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexSettings)) + && currentStepKey.equals(IndexLifecycleRunner.getCurrentStepKey(indexILMData))) { + logger.trace("moving [{}] to next step ({})", index.getName(), nextStepKey); + return IndexLifecycleRunner.moveClusterStateToNextStep(index, currentState, currentStepKey, nextStepKey, nowSupplier); + } else { + // either the policy has changed or the step is now + // not the same as when we submitted the update task. In + // either case we don't want to do anything now + return currentState; + } + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + if (oldState.equals(newState) == false) { + stateChangeConsumer.accept(newState); + } + } + + @Override + public void onFailure(String source, Exception e) { + throw new ElasticsearchException("policy [" + policy + "] for index [" + index.getName() + "] failed trying to move from step [" + + currentStepKey + "] to step [" + nextStepKey + "].", e); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTask.java new file mode 100644 index 0000000000000..0cf24300831cd --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTask.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; + +public class OperationModeUpdateTask extends ClusterStateUpdateTask { + private static final Logger logger = LogManager.getLogger(OperationModeUpdateTask.class); + private final OperationMode mode; + + public OperationModeUpdateTask(OperationMode mode) { + this.mode = mode; + } + + OperationMode getOperationMode() { + return mode; + } + + @Override + public ClusterState execute(ClusterState currentState) { + IndexLifecycleMetadata currentMetadata = currentState.metaData().custom(IndexLifecycleMetadata.TYPE); + if (currentMetadata != null && currentMetadata.getOperationMode().isValidChange(mode) == false) { + return currentState; + } else if (currentMetadata == null) { + currentMetadata = IndexLifecycleMetadata.EMPTY; + } + + final OperationMode newMode; + if (currentMetadata.getOperationMode().isValidChange(mode)) { + newMode = mode; + } else { + newMode = currentMetadata.getOperationMode(); + } + + ClusterState.Builder builder = new ClusterState.Builder(currentState); + MetaData.Builder metadataBuilder = MetaData.builder(currentState.metaData()); + metadataBuilder.putCustom(IndexLifecycleMetadata.TYPE, + new IndexLifecycleMetadata(currentMetadata.getPolicyMetadatas(), newMode)); + builder.metaData(metadataBuilder.build()); + return builder.build(); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error("unable to update lifecycle metadata with new mode [" + mode + "]", e); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistry.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistry.java new file mode 100644 index 0000000000000..d753a5035f756 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistry.java @@ -0,0 +1,256 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.DiffableUtils; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.Index; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.indexlifecycle.ErrorStep; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.InitializePolicyContextStep; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.PhaseExecutionInfo; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.stream.Collectors; + +public class PolicyStepsRegistry { + private static final Logger logger = LogManager.getLogger(PolicyStepsRegistry.class); + + private final Client client; + // keeps track of existing policies in the cluster state + private final SortedMap lifecyclePolicyMap; + // keeps track of what the first step in a policy is, the key is policy name + private final Map firstStepMap; + // keeps track of a mapping from policy/step-name to respective Step, the key is policy name + private final Map> stepMap; + private final NamedXContentRegistry xContentRegistry; + + public PolicyStepsRegistry(NamedXContentRegistry xContentRegistry, Client client) { + this(new TreeMap<>(), new HashMap<>(), new HashMap<>(), xContentRegistry, client); + } + + PolicyStepsRegistry(SortedMap lifecyclePolicyMap, + Map firstStepMap, Map> stepMap, + NamedXContentRegistry xContentRegistry, Client client) { + this.lifecyclePolicyMap = lifecyclePolicyMap; + this.firstStepMap = firstStepMap; + this.stepMap = stepMap; + this.xContentRegistry = xContentRegistry; + this.client = client; + } + + SortedMap getLifecyclePolicyMap() { + return lifecyclePolicyMap; + } + + Map getFirstStepMap() { + return firstStepMap; + } + + Map> getStepMap() { + return stepMap; + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + public void update(ClusterState clusterState) { + final IndexLifecycleMetadata meta = clusterState.metaData().custom(IndexLifecycleMetadata.TYPE); + + assert meta != null : "IndexLifecycleMetadata cannot be null when updating the policy steps registry"; + + Diff> diff = DiffableUtils.diff(lifecyclePolicyMap, meta.getPolicyMetadatas(), + DiffableUtils.getStringKeySerializer(), + // Use a non-diffable value serializer. Otherwise actions in the same + // action and phase that are changed show up as diffs instead of upserts. + // We want to treat any change in the policy as an upsert so the map is + // correctly rebuilt + new DiffableUtils.NonDiffableValueSerializer() { + @Override + public void write(LifecyclePolicyMetadata value, StreamOutput out) { + // This is never called + throw new UnsupportedOperationException("should never be called"); + } + + @Override + public LifecyclePolicyMetadata read(StreamInput in, String key) { + // This is never called + throw new UnsupportedOperationException("should never be called"); + } + }); + DiffableUtils.MapDiff> mapDiff = (DiffableUtils.MapDiff) diff; + + for (String deletedPolicyName : mapDiff.getDeletes()) { + lifecyclePolicyMap.remove(deletedPolicyName); + firstStepMap.remove(deletedPolicyName); + stepMap.remove(deletedPolicyName); + } + + if (mapDiff.getUpserts().isEmpty() == false) { + for (LifecyclePolicyMetadata policyMetadata : mapDiff.getUpserts().values()) { + LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, + policyMetadata.getHeaders()); + lifecyclePolicyMap.put(policyMetadata.getName(), policyMetadata); + List policyAsSteps = policyMetadata.getPolicy().toSteps(policyClient); + if (policyAsSteps.isEmpty() == false) { + firstStepMap.put(policyMetadata.getName(), policyAsSteps.get(0)); + final Map stepMapForPolicy = new HashMap<>(); + for (Step step : policyAsSteps) { + assert ErrorStep.NAME.equals(step.getKey().getName()) == false : "unexpected error step in policy"; + stepMapForPolicy.put(step.getKey(), step); + } + logger.trace("updating cached steps for [{}] policy, new steps: {}", + policyMetadata.getName(), stepMapForPolicy.keySet()); + stepMap.put(policyMetadata.getName(), stepMapForPolicy); + } + } + } + } + + private List parseStepsFromPhase(String policy, String currentPhase, String phaseDef) throws IOException { + final PhaseExecutionInfo phaseExecutionInfo; + LifecyclePolicyMetadata policyMetadata = lifecyclePolicyMap.get(policy); + if (policyMetadata == null) { + throw new IllegalStateException("unable to parse steps for policy [" + policy + "] as it doesn't exist"); + } + LifecyclePolicy currentPolicy = policyMetadata.getPolicy(); + final LifecyclePolicy policyToExecute; + if (InitializePolicyContextStep.INITIALIZATION_PHASE.equals(phaseDef) + || TerminalPolicyStep.COMPLETED_PHASE.equals(phaseDef)) { + // It is ok to re-use potentially modified policy here since we are in an initialization or completed phase + policyToExecute = currentPolicy; + } else { + // if the current phase definition describes an internal step/phase, do not parse + try (XContentParser parser = JsonXContent.jsonXContent.createParser(xContentRegistry, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, phaseDef)) { + phaseExecutionInfo = PhaseExecutionInfo.parse(parser, currentPhase); + } + Map phaseMap = new HashMap<>(currentPolicy.getPhases()); + if (phaseExecutionInfo.getPhase() != null) { + phaseMap.put(currentPhase, phaseExecutionInfo.getPhase()); + } + policyToExecute = new LifecyclePolicy(currentPolicy.getType(), currentPolicy.getName(), phaseMap); + } + LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient(client, + ClientHelper.INDEX_LIFECYCLE_ORIGIN, lifecyclePolicyMap.get(policy).getHeaders()); + final List steps = policyToExecute.toSteps(policyClient); + // Build a list of steps that correspond with the phase the index is currently in + final List phaseSteps; + if (steps == null) { + phaseSteps = new ArrayList<>(); + } else { + phaseSteps = steps.stream() + .filter(e -> e.getKey().getPhase().equals(currentPhase)) + .collect(Collectors.toList()); + } + logger.trace("parsed steps for policy [{}] in phase [{}], definition: [{}], steps: [{}]", + policy, currentPhase, phaseDef, phaseSteps); + return phaseSteps; + } + + @Nullable + public Step getStep(final IndexMetaData indexMetaData, final Step.StepKey stepKey) { + if (ErrorStep.NAME.equals(stepKey.getName())) { + return new ErrorStep(new Step.StepKey(stepKey.getPhase(), stepKey.getAction(), ErrorStep.NAME)); + } + + final String phase = stepKey.getPhase(); + final String policyName = indexMetaData.getSettings().get(LifecycleSettings.LIFECYCLE_NAME); + final Index index = indexMetaData.getIndex(); + + if (policyName == null) { + throw new IllegalArgumentException("failed to retrieve step " + stepKey + " as index [" + index.getName() + "] has no policy"); + } + + // parse phase steps from the phase definition in the index settings + final String phaseJson = Optional.ofNullable(LifecycleExecutionState.fromIndexMetadata(indexMetaData).getPhaseDefinition()) + .orElse(InitializePolicyContextStep.INITIALIZATION_PHASE); + + final List phaseSteps; + try { + phaseSteps = parseStepsFromPhase(policyName, phase, phaseJson); + } catch (IOException e) { + throw new ElasticsearchException("failed to load cached steps for " + stepKey, e); + } catch (XContentParseException parseErr) { + throw new XContentParseException(parseErr.getLocation(), + "failed to load steps for " + stepKey + " from [" + phaseJson + "]", parseErr); + } + + assert phaseSteps.stream().allMatch(step -> step.getKey().getPhase().equals(phase)) : + "expected phase steps loaded from phase definition for [" + index.getName() + "] to be in phase [" + phase + + "] but they were not, steps: " + phaseSteps; + + // Return the step that matches the given stepKey or else null if we couldn't find it + return phaseSteps.stream().filter(step -> step.getKey().equals(stepKey)).findFirst().orElse(null); + } + + /** + * Given a policy and stepkey, return true if a step exists, false otherwise + */ + public boolean stepExists(final String policy, final Step.StepKey stepKey) { + Map steps = stepMap.get(policy); + if (steps == null) { + return false; + } else { + return steps.containsKey(stepKey); + } + } + + public boolean policyExists(final String policy) { + return lifecyclePolicyMap.containsKey(policy); + } + + public Step getFirstStep(String policy) { + return firstStepMap.get(policy); + } + + public TimeValue getIndexAgeForPhase(final String policy, final String phase) { + // These built in phases should never wait + if (InitializePolicyContextStep.INITIALIZATION_PHASE.equals(phase) || TerminalPolicyStep.COMPLETED_PHASE.equals(phase)) { + return TimeValue.ZERO; + } + final LifecyclePolicyMetadata meta = lifecyclePolicyMap.get(policy); + if (meta == null) { + throw new IllegalArgumentException("no policy found with name \"" + policy + "\""); + } else { + final Phase retrievedPhase = meta.getPolicy().getPhases().get(phase); + if (retrievedPhase == null) { + // We don't have that phase registered, proceed right through it + return TimeValue.ZERO; + } else { + return retrievedPhase.getMinimumAge(); + } + } + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTask.java new file mode 100644 index 0000000000000..72c7aa81b9d77 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTask.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.Step; + +import java.io.IOException; +import java.util.Objects; + +public class SetStepInfoUpdateTask extends ClusterStateUpdateTask { + private final Index index; + private final String policy; + private final Step.StepKey currentStepKey; + private ToXContentObject stepInfo; + + public SetStepInfoUpdateTask(Index index, String policy, Step.StepKey currentStepKey, ToXContentObject stepInfo) { + this.index = index; + this.policy = policy; + this.currentStepKey = currentStepKey; + this.stepInfo = stepInfo; + } + + Index getIndex() { + return index; + } + + String getPolicy() { + return policy; + } + + Step.StepKey getCurrentStepKey() { + return currentStepKey; + } + + ToXContentObject getStepInfo() { + return stepInfo; + } + + @Override + public ClusterState execute(ClusterState currentState) throws IOException { + IndexMetaData idxMeta = currentState.getMetaData().index(index); + if (idxMeta == null) { + // Index must have been since deleted, ignore it + return currentState; + } + Settings indexSettings = idxMeta.getSettings(); + LifecycleExecutionState indexILMData = LifecycleExecutionState.fromIndexMetadata(idxMeta); + if (policy.equals(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexSettings)) + && Objects.equals(currentStepKey, IndexLifecycleRunner.getCurrentStepKey(indexILMData))) { + return IndexLifecycleRunner.addStepInfoToClusterState(index, currentState, stepInfo); + } else { + // either the policy has changed or the step is now + // not the same as when we submitted the update task. In + // either case we don't want to do anything now + return currentState; + } + } + + @Override + public void onFailure(String source, Exception e) { + throw new ElasticsearchException("policy [" + policy + "] for index [" + index.getName() + + "] failed trying to set step info for step [" + currentStepKey + "].", e); + } + + public static class ExceptionWrapper implements ToXContentObject { + private final Throwable exception; + + public ExceptionWrapper(Throwable exception) { + this.exception = exception; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + ElasticsearchException.generateThrowableXContent(builder, params, exception); + builder.endObject(); + return builder; + } + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/TimeValueSchedule.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/TimeValueSchedule.java new file mode 100644 index 0000000000000..436f8637a0228 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/TimeValueSchedule.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine.Schedule; + +import java.util.Objects; + +public class TimeValueSchedule implements Schedule { + + private TimeValue interval; + + public TimeValueSchedule(TimeValue interval) { + if (interval.millis() <= 0) { + throw new IllegalArgumentException("interval must be greater than 0 milliseconds"); + } + this.interval = interval; + } + + public TimeValue getInterval() { + return interval; + } + + @Override + public long nextScheduledTimeAfter(long startTime, long time) { + assert time >= startTime; + if (startTime == time) { + time++; + } + long delta = time - startTime; + return startTime + (delta / interval.millis() + 1) * interval.millis(); + } + + @Override + public int hashCode() { + return Objects.hash(interval); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + TimeValueSchedule other = (TimeValueSchedule) obj; + return Objects.equals(interval, other.interval); + } + +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestDeleteLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestDeleteLifecycleAction.java new file mode 100644 index 0000000000000..081e7d1565f79 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestDeleteLifecycleAction.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction; + +import java.io.IOException; + +public class RestDeleteLifecycleAction extends BaseRestHandler { + + public RestDeleteLifecycleAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.DELETE, "/_ilm/policy/{name}", this); + } + + @Override + public String getName() { + return "ilm_delete_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String lifecycleName = restRequest.param("name"); + DeleteLifecycleAction.Request deleteLifecycleRequest = new DeleteLifecycleAction.Request(lifecycleName); + deleteLifecycleRequest.timeout(restRequest.paramAsTime("timeout", deleteLifecycleRequest.timeout())); + deleteLifecycleRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", deleteLifecycleRequest.masterNodeTimeout())); + + return channel -> client.execute(DeleteLifecycleAction.INSTANCE, deleteLifecycleRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestExplainLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestExplainLifecycleAction.java new file mode 100644 index 0000000000000..96be5f0fc0337 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestExplainLifecycleAction.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.ExplainLifecycleAction; + +import java.io.IOException; + +public class RestExplainLifecycleAction extends BaseRestHandler { + + public RestExplainLifecycleAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.GET, "/{index}/_ilm/explain", this); + } + + @Override + public String getName() { + return "ilm_explain_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String[] indexes = Strings.splitStringByCommaToArray(restRequest.param("index")); + ExplainLifecycleRequest explainLifecycleRequest = new ExplainLifecycleRequest(); + explainLifecycleRequest.indices(indexes); + explainLifecycleRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, IndicesOptions.strictExpandOpen())); + String masterNodeTimeout = restRequest.param("master_timeout"); + if (masterNodeTimeout != null) { + explainLifecycleRequest.masterNodeTimeout(masterNodeTimeout); + } + + return channel -> client.execute(ExplainLifecycleAction.INSTANCE, explainLifecycleRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetLifecycleAction.java new file mode 100644 index 0000000000000..b518fe2f08698 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetLifecycleAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction; + +import java.io.IOException; + +public class RestGetLifecycleAction extends BaseRestHandler { + + public RestGetLifecycleAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.GET, "/_ilm/policy", this); + controller.registerHandler(RestRequest.Method.GET, "/_ilm/policy/{name}", this); + } + + @Override + public String getName() { + return "ilm_get_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String[] lifecycleNames = Strings.splitStringByCommaToArray(restRequest.param("name")); + GetLifecycleAction.Request getLifecycleRequest = new GetLifecycleAction.Request(lifecycleNames); + getLifecycleRequest.timeout(restRequest.paramAsTime("timeout", getLifecycleRequest.timeout())); + getLifecycleRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", getLifecycleRequest.masterNodeTimeout())); + + return channel -> client.execute(GetLifecycleAction.INSTANCE, getLifecycleRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetStatusAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetStatusAction.java new file mode 100644 index 0000000000000..be2d16ee0be76 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetStatusAction.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetStatusAction; + +public class RestGetStatusAction extends BaseRestHandler { + + public RestGetStatusAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.GET, "/_ilm/status", this); + } + + @Override + public String getName() { + return "ilm_get_operation_mode_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + GetStatusAction.Request request = new GetStatusAction.Request(); + request.timeout(restRequest.paramAsTime("timeout", request.timeout())); + request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + return channel -> client.execute(GetStatusAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestMoveToStepAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestMoveToStepAction.java new file mode 100644 index 0000000000000..41228041679e7 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestMoveToStepAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + * + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction; + +import java.io.IOException; + +public class RestMoveToStepAction extends BaseRestHandler { + + public RestMoveToStepAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST,"/_ilm/move/{name}", this); + } + + @Override + public String getName() { + return "ilm_move_to_step_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String index = restRequest.param("name"); + XContentParser parser = restRequest.contentParser(); + MoveToStepAction.Request request = MoveToStepAction.Request.parseRequest(index, parser); + request.timeout(restRequest.paramAsTime("timeout", request.timeout())); + request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + return channel -> client.execute(MoveToStepAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestPutLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestPutLifecycleAction.java new file mode 100644 index 0000000000000..586c3c683264e --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestPutLifecycleAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; + +import java.io.IOException; + +public class RestPutLifecycleAction extends BaseRestHandler { + + public RestPutLifecycleAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.PUT, "/_ilm/policy/{name}", this); + } + + @Override + public String getName() { + return "ilm_put_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String lifecycleName = restRequest.param("name"); + XContentParser parser = restRequest.contentParser(); + PutLifecycleAction.Request putLifecycleRequest = PutLifecycleAction.Request.parseRequest(lifecycleName, parser); + putLifecycleRequest.timeout(restRequest.paramAsTime("timeout", putLifecycleRequest.timeout())); + putLifecycleRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", putLifecycleRequest.masterNodeTimeout())); + + return channel -> client.execute(PutLifecycleAction.INSTANCE, putLifecycleRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRemoveIndexLifecyclePolicyAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRemoveIndexLifecyclePolicyAction.java new file mode 100644 index 0000000000000..afa328ab8d8fe --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRemoveIndexLifecyclePolicyAction.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction; + +import java.io.IOException; + +public class RestRemoveIndexLifecyclePolicyAction extends BaseRestHandler { + + public RestRemoveIndexLifecyclePolicyAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.DELETE, "/{index}/_ilm", this); + } + + @Override + public String getName() { + return "ilm_remove_policy_for_index_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String[] indexes = Strings.splitStringByCommaToArray(restRequest.param("index")); + RemoveIndexLifecyclePolicyAction.Request changePolicyRequest = new RemoveIndexLifecyclePolicyAction.Request(indexes); + changePolicyRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", changePolicyRequest.masterNodeTimeout())); + changePolicyRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, changePolicyRequest.indicesOptions())); + + return channel -> + client.execute(RemoveIndexLifecyclePolicyAction.INSTANCE, changePolicyRequest, new RestToXContentListener<>(channel)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRetryAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRetryAction.java new file mode 100644 index 0000000000000..9e12c3cc34ed7 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRetryAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + * + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction; + +public class RestRetryAction extends BaseRestHandler { + + public RestRetryAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_ilm/retry", this); + } + + @Override + public String getName() { + return "ilm_retry_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + String[] indices = Strings.splitStringByCommaToArray(restRequest.param("index")); + RetryAction.Request request = new RetryAction.Request(indices); + request.timeout(restRequest.paramAsTime("timeout", request.timeout())); + request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.indices(indices); + request.indicesOptions(IndicesOptions.fromRequest(restRequest, IndicesOptions.strictExpandOpen())); + return channel -> client.execute(RetryAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStartILMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStartILMAction.java new file mode 100644 index 0000000000000..84f46a30406fd --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStartILMAction.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.indexlifecycle.StartILMRequest; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.StartILMAction; + +public class RestStartILMAction extends BaseRestHandler { + + public RestStartILMAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, "/_ilm/start", this); + } + + @Override + public String getName() { + return "ilm_start_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + StartILMRequest request = new StartILMRequest(); + request.timeout(restRequest.paramAsTime("timeout", request.timeout())); + request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + return channel -> client.execute(StartILMAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStopAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStopAction.java new file mode 100644 index 0000000000000..2f8d3c5e43037 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStopAction.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.indexlifecycle.StopILMRequest; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.StopILMAction; + +public class RestStopAction extends BaseRestHandler { + + public RestStopAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, "/_ilm/stop", this); + } + + @Override + public String getName() { + return "ilm_stop_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + StopILMRequest request = new StopILMRequest(); + request.timeout(restRequest.paramAsTime("timeout", request.timeout())); + request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + return channel -> client.execute(StopILMAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportDeleteLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportDeleteLifecycleAction.java new file mode 100644 index 0000000000000..a8b718bbb7be7 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportDeleteLifecycleAction.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction.Request; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction.Response; + +import java.util.Iterator; +import java.util.SortedMap; +import java.util.TreeMap; + +public class TransportDeleteLifecycleAction extends TransportMasterNodeAction { + + @Inject + public TransportDeleteLifecycleAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, DeleteLifecycleAction.NAME, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver, Request::new); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception { + clusterService.submitStateUpdateTask("delete-lifecycle-" + request.getPolicyName(), + new AckedClusterStateUpdateTask(request, listener) { + @Override + protected Response newResponse(boolean acknowledged) { + return new Response(acknowledged); + } + + @Override + public ClusterState execute(ClusterState currentState) { + Iterator indicesIt = currentState.metaData().indices().valuesIt(); + while(indicesIt.hasNext()) { + IndexMetaData idxMeta = indicesIt.next(); + String indexPolicy = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(idxMeta.getSettings()); + if (request.getPolicyName().equals(indexPolicy)) { + throw new IllegalArgumentException("Cannot delete policy [" + request.getPolicyName() + + "]. It is being used by at least one index [" + idxMeta.getIndex().getName() + "]"); + } + + } + ClusterState.Builder newState = ClusterState.builder(currentState); + IndexLifecycleMetadata currentMetadata = currentState.metaData().custom(IndexLifecycleMetadata.TYPE); + if (currentMetadata == null + || currentMetadata.getPolicyMetadatas().containsKey(request.getPolicyName()) == false) { + throw new ResourceNotFoundException("Lifecycle policy not found: {}", request.getPolicyName()); + } + SortedMap newPolicies = new TreeMap<>(currentMetadata.getPolicyMetadatas()); + newPolicies.remove(request.getPolicyName()); + IndexLifecycleMetadata newMetadata = new IndexLifecycleMetadata(newPolicies, currentMetadata.getOperationMode()); + newState.metaData(MetaData.builder(currentState.getMetaData()) + .putCustom(IndexLifecycleMetadata.TYPE, newMetadata).build()); + return newState.build(); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportExplainLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportExplainLifecycleAction.java new file mode 100644 index 0000000000000..addef8a285df9 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportExplainLifecycleAction.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.info.TransportClusterInfoAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleResponse; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleExplainResponse; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.PhaseExecutionInfo; +import org.elasticsearch.xpack.core.indexlifecycle.action.ExplainLifecycleAction; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class TransportExplainLifecycleAction + extends TransportClusterInfoAction { + + private final NamedXContentRegistry xContentRegistry; + + @Inject + public TransportExplainLifecycleAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + NamedXContentRegistry xContentRegistry) { + super(settings, ExplainLifecycleAction.NAME, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver, ExplainLifecycleRequest::new); + this.xContentRegistry = xContentRegistry; + } + + @Override + protected ExplainLifecycleResponse newResponse() { + return new ExplainLifecycleResponse(); + } + + @Override + protected String executor() { + // very lightweight operation, no need to fork + return ThreadPool.Names.SAME; + } + + @Override + protected ClusterBlockException checkBlock(ExplainLifecycleRequest request, ClusterState state) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, + indexNameExpressionResolver.concreteIndexNames(state, request)); + } + + @Override + protected void doMasterOperation(ExplainLifecycleRequest request, String[] concreteIndices, ClusterState state, + ActionListener listener) { + Map indexReponses = new HashMap<>(); + for (String index : concreteIndices) { + IndexMetaData idxMetadata = state.metaData().index(index); + Settings idxSettings = idxMetadata.getSettings(); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(idxMetadata); + String policyName = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(idxSettings); + String currentPhase = lifecycleState.getPhase(); + String stepInfo = lifecycleState.getStepInfo(); + BytesArray stepInfoBytes = null; + if (stepInfo != null) { + stepInfoBytes = new BytesArray(stepInfo); + } + // parse existing phase steps from the phase definition in the index settings + String phaseDef = lifecycleState.getPhaseDefinition(); + PhaseExecutionInfo phaseExecutionInfo = null; + if (Strings.isNullOrEmpty(phaseDef) == false) { + try (XContentParser parser = JsonXContent.jsonXContent.createParser(xContentRegistry, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, phaseDef)) { + phaseExecutionInfo = PhaseExecutionInfo.parse(parser, currentPhase); + } catch (IOException e) { + listener.onFailure(new ElasticsearchParseException( + "failed to parse phase definition for index [" + index + "]", e)); + return; + } + } + final IndexLifecycleExplainResponse indexResponse; + if (Strings.hasLength(policyName)) { + indexResponse = IndexLifecycleExplainResponse.newManagedIndexResponse(index, policyName, + lifecycleState.getLifecycleDate(), + lifecycleState.getPhase(), + lifecycleState.getAction(), + lifecycleState.getStep(), + lifecycleState.getFailedStep(), + lifecycleState.getPhaseTime(), + lifecycleState.getActionTime(), + lifecycleState.getStepTime(), + stepInfoBytes, + phaseExecutionInfo); + } else { + indexResponse = IndexLifecycleExplainResponse.newUnmanagedIndexResponse(index); + } + indexReponses.put(indexResponse.getIndex(), indexResponse); + } + listener.onResponse(new ExplainLifecycleResponse(indexReponses)); + } + +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportGetLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportGetLifecycleAction.java new file mode 100644 index 0000000000000..c7660dc68d6ab --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportGetLifecycleAction.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction.LifecyclePolicyResponseItem; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction.Request; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction.Response; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +public class TransportGetLifecycleAction extends TransportMasterNodeAction { + + @Inject + public TransportGetLifecycleAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, GetLifecycleAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + Request::new); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected void masterOperation(Request request, ClusterState state, ActionListener listener) { + IndexLifecycleMetadata metadata = clusterService.state().metaData().custom(IndexLifecycleMetadata.TYPE); + if (metadata == null) { + if (request.getPolicyNames().length == 0) { + listener.onResponse(new Response(Collections.emptyList())); + } else { + listener.onFailure(new ResourceNotFoundException("Lifecycle policy not found: {}", + Arrays.toString(request.getPolicyNames()))); + } + } else { + List requestedPolicies; + // if no policies explicitly provided, behave as if `*` was specified + if (request.getPolicyNames().length == 0) { + requestedPolicies = new ArrayList<>(metadata.getPolicyMetadatas().size()); + for (LifecyclePolicyMetadata policyMetadata : metadata.getPolicyMetadatas().values()) { + requestedPolicies.add(new LifecyclePolicyResponseItem(policyMetadata.getPolicy(), + policyMetadata.getVersion(), policyMetadata.getModifiedDateString())); + } + } else { + requestedPolicies = new ArrayList<>(request.getPolicyNames().length); + for (String name : request.getPolicyNames()) { + LifecyclePolicyMetadata policyMetadata = metadata.getPolicyMetadatas().get(name); + if (policyMetadata == null) { + listener.onFailure(new ResourceNotFoundException("Lifecycle policy not found: {}", name)); + return; + } + requestedPolicies.add(new LifecyclePolicyResponseItem(policyMetadata.getPolicy(), + policyMetadata.getVersion(), policyMetadata.getModifiedDateString())); + } + } + listener.onResponse(new Response(requestedPolicies)); + } + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportGetStatusAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportGetStatusAction.java new file mode 100644 index 0000000000000..b5f777540aa04 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportGetStatusAction.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetStatusAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetStatusAction.Request; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetStatusAction.Response; + +public class TransportGetStatusAction extends TransportMasterNodeAction { + + @Inject + public TransportGetStatusAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, GetStatusAction.NAME, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver, Request::new); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected void masterOperation(Request request, ClusterState state, ActionListener listener) { + IndexLifecycleMetadata metadata = state.metaData().custom(IndexLifecycleMetadata.TYPE); + final Response response; + if (metadata == null) { + // no need to actually install metadata just yet, but safe to say it is not stopped + response = new Response(OperationMode.RUNNING); + } else { + response = new Response(metadata.getOperationMode()); + } + listener.onResponse(response); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportMoveToStepAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportMoveToStepAction.java new file mode 100644 index 0000000000000..a2744569e213c --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportMoveToStepAction.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction.Request; +import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction.Response; +import org.elasticsearch.xpack.indexlifecycle.IndexLifecycleService; + +public class TransportMoveToStepAction extends TransportMasterNodeAction { + IndexLifecycleService indexLifecycleService; + @Inject + public TransportMoveToStepAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + IndexLifecycleService indexLifecycleService) { + super(settings, MoveToStepAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + Request::new); + this.indexLifecycleService = indexLifecycleService; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected void masterOperation(Request request, ClusterState state, ActionListener listener) { + IndexMetaData indexMetaData = state.metaData().index(request.getIndex()); + if (indexMetaData == null) { + listener.onFailure(new IllegalArgumentException("index [" + request.getIndex() + "] does not exist")); + return; + } + clusterService.submitStateUpdateTask("index[" + request.getIndex() + "]-move-to-step", + new AckedClusterStateUpdateTask(request, listener) { + @Override + public ClusterState execute(ClusterState currentState) { + return indexLifecycleService.moveClusterStateToStep(currentState, request.getIndex(), request.getCurrentStepKey(), + request.getNextStepKey()); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + IndexMetaData newIndexMetaData = newState.metaData().index(indexMetaData.getIndex()); + if (newIndexMetaData == null) { + // The index has somehow been deleted - there shouldn't be any opportunity for this to happen, but just in case. + logger.debug("index [" + indexMetaData.getIndex() + "] has been deleted after moving to step [" + + request.getNextStepKey() + "], skipping async action check"); + return; + } + indexLifecycleService.maybeRunAsyncAction(newState, newIndexMetaData, request.getNextStepKey()); + } + + @Override + protected Response newResponse(boolean acknowledged) { + return new Response(acknowledged); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportPutLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportPutLifecycleAction.java new file mode 100644 index 0000000000000..2a56f179f39a5 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportPutLifecycleAction.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction.Request; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction.Response; + +import java.time.Instant; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.stream.Collectors; + +/** + * This class is responsible for bootstrapping {@link IndexLifecycleMetadata} into the cluster-state, as well + * as adding the desired new policy to be inserted. + */ +public class TransportPutLifecycleAction extends TransportMasterNodeAction { + + @Inject + public TransportPutLifecycleAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, PutLifecycleAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + Request::new); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected void masterOperation(Request request, ClusterState state, ActionListener listener) { + // headers from the thread context stored by the AuthenticationService to be shared between the + // REST layer and the Transport layer here must be accessed within this thread and not in the + // cluster state thread in the ClusterStateUpdateTask below since that thread does not share the + // same context, and therefore does not have access to the appropriate security headers. + Map filteredHeaders = threadPool.getThreadContext().getHeaders().entrySet().stream() + .filter(e -> ClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + clusterService.submitStateUpdateTask("put-lifecycle-" + request.getPolicy().getName(), + new AckedClusterStateUpdateTask(request, listener) { + @Override + protected Response newResponse(boolean acknowledged) { + return new Response(acknowledged); + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + ClusterState.Builder newState = ClusterState.builder(currentState); + IndexLifecycleMetadata currentMetadata = currentState.metaData().custom(IndexLifecycleMetadata.TYPE); + if (currentMetadata == null) { // first time using index-lifecycle feature, bootstrap metadata + currentMetadata = IndexLifecycleMetadata.EMPTY; + } + LifecyclePolicyMetadata existingPolicyMetadata = currentMetadata.getPolicyMetadatas() + .get(request.getPolicy().getName()); + long nextVersion = (existingPolicyMetadata == null) ? 1L : existingPolicyMetadata.getVersion() + 1L; + SortedMap newPolicies = new TreeMap<>(currentMetadata.getPolicyMetadatas()); + LifecyclePolicyMetadata lifecyclePolicyMetadata = new LifecyclePolicyMetadata(request.getPolicy(), filteredHeaders, + nextVersion, Instant.now().toEpochMilli()); + newPolicies.put(lifecyclePolicyMetadata.getName(), lifecyclePolicyMetadata); + IndexLifecycleMetadata newMetadata = new IndexLifecycleMetadata(newPolicies, OperationMode.RUNNING); + newState.metaData(MetaData.builder(currentState.getMetaData()) + .putCustom(IndexLifecycleMetadata.TYPE, newMetadata).build()); + return newState.build(); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportRemoveIndexLifecyclePolicyAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportRemoveIndexLifecyclePolicyAction.java new file mode 100644 index 0000000000000..149921a29a3d1 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportRemoveIndexLifecyclePolicyAction.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction.Request; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction.Response; +import org.elasticsearch.xpack.indexlifecycle.IndexLifecycleRunner; + +import java.util.ArrayList; +import java.util.List; + +public class TransportRemoveIndexLifecyclePolicyAction extends TransportMasterNodeAction { + + @Inject + public TransportRemoveIndexLifecyclePolicyAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, RemoveIndexLifecyclePolicyAction.NAME, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver, Request::new); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception { + final Index[] indices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), request.indices()); + clusterService.submitStateUpdateTask("remove-lifecycle-for-index", + new AckedClusterStateUpdateTask(request, listener) { + + private final List failedIndexes = new ArrayList<>(); + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + return IndexLifecycleRunner.removePolicyForIndexes(indices, currentState, failedIndexes); + } + + @Override + public void onFailure(String source, Exception e) { + listener.onFailure(e); + } + + @Override + protected Response newResponse(boolean acknowledged) { + return new Response(failedIndexes); + } + }); + } + +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportRetryAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportRetryAction.java new file mode 100644 index 0000000000000..4bbefafee0866 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportRetryAction.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction.Request; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction.Response; +import org.elasticsearch.xpack.indexlifecycle.IndexLifecycleService; + +public class TransportRetryAction extends TransportMasterNodeAction { + + IndexLifecycleService indexLifecycleService; + + @Inject + public TransportRetryAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + IndexLifecycleService indexLifecycleService) { + super(settings, RetryAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + Request::new); + this.indexLifecycleService = indexLifecycleService; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected void masterOperation(Request request, ClusterState state, ActionListener listener) { + clusterService.submitStateUpdateTask("ilm-re-run", + new AckedClusterStateUpdateTask(request, listener) { + @Override + public ClusterState execute(ClusterState currentState) { + return indexLifecycleService.moveClusterStateToFailedStep(currentState, request.indices()); + } + + @Override + protected Response newResponse(boolean acknowledged) { + return new Response(acknowledged); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportStartILMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportStartILMAction.java new file mode 100644 index 0000000000000..f4abe9fd4c79f --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportStartILMAction.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.StartILMRequest; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.action.StartILMAction; +import org.elasticsearch.xpack.indexlifecycle.OperationModeUpdateTask; + +public class TransportStartILMAction extends TransportMasterNodeAction { + + @Inject + public TransportStartILMAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, StartILMAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + StartILMRequest::new); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + @Override + protected void masterOperation(StartILMRequest request, ClusterState state, ActionListener listener) { + clusterService.submitStateUpdateTask("ilm_operation_mode_update", + new AckedClusterStateUpdateTask(request, listener) { + @Override + public ClusterState execute(ClusterState currentState) { + return (new OperationModeUpdateTask(OperationMode.RUNNING)).execute(currentState); + } + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(StartILMRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportStopILMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportStopILMAction.java new file mode 100644 index 0000000000000..55e036e5f11e0 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportStopILMAction.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.StopILMRequest; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.action.StopILMAction; +import org.elasticsearch.xpack.indexlifecycle.OperationModeUpdateTask; + +public class TransportStopILMAction extends TransportMasterNodeAction { + + @Inject + public TransportStopILMAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, StopILMAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + StopILMRequest::new); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + @Override + protected void masterOperation(StopILMRequest request, ClusterState state, ActionListener listener) { + clusterService.submitStateUpdateTask("ilm_operation_mode_update", + new AckedClusterStateUpdateTask(request, listener) { + @Override + public ClusterState execute(ClusterState currentState) { + return (new OperationModeUpdateTask(OperationMode.STOPPING)).execute(currentState); + } + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(StopILMRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyTestsUtils.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyTestsUtils.java new file mode 100644 index 0000000000000..3776363cf175e --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyTestsUtils.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.xpack.indexlifecycle.LockableLifecycleType; + +import java.util.Map; + +/** + * This class is here for constructing instances of {@link LifecyclePolicy} that differs from + * the main {@link TimeseriesLifecycleType} one. Since the more generic constructor is package-private so + * that users are not exposed to {@link LifecycleType}, it is still useful to construct different ones for + * testing purposes + */ +public class LifecyclePolicyTestsUtils { + + public static LifecyclePolicy newTestLifecyclePolicy(String policyName, Map phases) { + return new LifecyclePolicy(TestLifecycleType.INSTANCE, policyName, phases); + } + + public static LifecyclePolicy newLockableLifecyclePolicy(String policyName, Map phases) { + return new LifecyclePolicy(LockableLifecycleType.INSTANCE, policyName, phases); + } + + public static LifecyclePolicy randomTimeseriesLifecyclePolicy(String policyName) { + return LifecyclePolicyTests.randomTimeseriesLifecyclePolicy(policyName); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTaskTests.java new file mode 100644 index 0000000000000..21f2b0e70939f --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTaskTests.java @@ -0,0 +1,307 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.index.Index; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.ErrorStep; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.MockAction; +import org.elasticsearch.xpack.core.indexlifecycle.MockStep; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; +import org.elasticsearch.xpack.indexlifecycle.IndexLifecycleRunnerTests.MockClusterStateActionStep; +import org.elasticsearch.xpack.indexlifecycle.IndexLifecycleRunnerTests.MockClusterStateWaitStep; +import org.junit.Before; +import org.mockito.Mockito; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTestsUtils.newTestLifecyclePolicy; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; + +public class ExecuteStepsUpdateTaskTests extends ESTestCase { + + private static final StepKey firstStepKey = new StepKey("first_phase", "action_1", "step_1"); + private static final StepKey secondStepKey = new StepKey("first_phase", "action_1", "step_2"); + private static final StepKey thirdStepKey = new StepKey("first_phase", "action_1", "step_3"); + private static final StepKey invalidStepKey = new StepKey("invalid", "invalid", "invalid"); + private ClusterState clusterState; + private PolicyStepsRegistry policyStepsRegistry; + private String mixedPolicyName; + private String allClusterPolicyName; + private String invalidPolicyName; + private Index index; + private IndexMetaData indexMetaData; + private MockClusterStateActionStep firstStep; + private MockClusterStateWaitStep secondStep; + private MockClusterStateWaitStep allClusterSecondStep; + private MockStep thirdStep; + private Client client; + private IndexLifecycleMetadata lifecycleMetadata; + private String indexName; + + @Before + public void prepareState() throws IOException { + client = Mockito.mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + firstStep = new MockClusterStateActionStep(firstStepKey, secondStepKey); + secondStep = new MockClusterStateWaitStep(secondStepKey, thirdStepKey); + secondStep.setWillComplete(true); + allClusterSecondStep = new MockClusterStateWaitStep(secondStepKey, TerminalPolicyStep.KEY); + allClusterSecondStep.setWillComplete(true); + thirdStep = new MockStep(thirdStepKey, null); + mixedPolicyName = randomAlphaOfLengthBetween(5, 10); + allClusterPolicyName = randomAlphaOfLengthBetween(1, 4); + invalidPolicyName = randomAlphaOfLength(11); + Phase mixedPhase = new Phase("first_phase", TimeValue.ZERO, Collections.singletonMap(MockAction.NAME, + new MockAction(Arrays.asList(firstStep, secondStep, thirdStep)))); + Phase allClusterPhase = new Phase("first_phase", TimeValue.ZERO, Collections.singletonMap(MockAction.NAME, + new MockAction(Arrays.asList(firstStep, allClusterSecondStep)))); + Phase invalidPhase = new Phase("invalid_phase", TimeValue.ZERO, Collections.singletonMap(MockAction.NAME, + new MockAction(Arrays.asList(new MockClusterStateActionStep(firstStepKey, invalidStepKey))))); + LifecyclePolicy mixedPolicy = newTestLifecyclePolicy(mixedPolicyName, + Collections.singletonMap(mixedPhase.getName(), mixedPhase)); + LifecyclePolicy allClusterPolicy = newTestLifecyclePolicy(allClusterPolicyName, + Collections.singletonMap(allClusterPhase.getName(), allClusterPhase)); + LifecyclePolicy invalidPolicy = newTestLifecyclePolicy(invalidPolicyName, + Collections.singletonMap(invalidPhase.getName(), invalidPhase)); + Map policyMap = new HashMap<>(); + policyMap.put(mixedPolicyName, new LifecyclePolicyMetadata(mixedPolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + policyMap.put(allClusterPolicyName, new LifecyclePolicyMetadata(allClusterPolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + policyMap.put(invalidPolicyName, new LifecyclePolicyMetadata(invalidPolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + policyStepsRegistry = new PolicyStepsRegistry(NamedXContentRegistry.EMPTY, client); + + indexName = randomAlphaOfLength(5); + lifecycleMetadata = new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING); + indexMetaData = setupIndexPolicy(mixedPolicyName); + } + + private IndexMetaData setupIndexPolicy(String policyName) { + // Reset the index to use the "allClusterPolicyName" + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase("new"); + lifecycleState.setAction("init"); + lifecycleState.setStep("init"); + IndexMetaData indexMetadata = IndexMetaData.builder(indexName) + .settings(settings(Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + index = indexMetadata.getIndex(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata) + .put(IndexMetaData.builder(indexMetadata)) + .build(); + String nodeId = randomAlphaOfLength(10); + DiscoveryNode masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT) + .put(Node.NODE_MASTER_SETTING.getKey(), true).build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId); + clusterState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + policyStepsRegistry.update(clusterState); + return indexMetadata; + } + + public void testNeverExecuteNonClusterStateStep() throws IOException { + setStateToKey(thirdStepKey); + Step startStep = policyStepsRegistry.getStep(indexMetaData, thirdStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(mixedPolicyName, index, startStep, policyStepsRegistry, null, () -> now); + assertThat(task.execute(clusterState), sameInstance(clusterState)); + } + + public void testSuccessThenFailureUnsetNextKey() throws IOException { + secondStep.setWillComplete(false); + setStateToKey(firstStepKey); + Step startStep = policyStepsRegistry.getStep(indexMetaData, firstStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(mixedPolicyName, index, startStep, policyStepsRegistry, null, () -> now); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey currentStepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(currentStepKey, equalTo(secondStepKey)); + assertThat(firstStep.getExecuteCount(), equalTo(1L)); + assertThat(secondStep.getExecuteCount(), equalTo(1L)); + assertThat(task.getNextStepKey(), nullValue()); + assertThat(lifecycleState.getPhaseTime(), nullValue()); + assertThat(lifecycleState.getActionTime(), nullValue()); + assertThat(lifecycleState.getStepInfo(), nullValue()); + } + + public void testExecuteUntilFirstNonClusterStateStep() throws IOException { + setStateToKey(secondStepKey); + Step startStep = policyStepsRegistry.getStep(indexMetaData, secondStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(mixedPolicyName, index, startStep, policyStepsRegistry, null, () -> now); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey currentStepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(currentStepKey, equalTo(thirdStepKey)); + assertThat(firstStep.getExecuteCount(), equalTo(0L)); + assertThat(secondStep.getExecuteCount(), equalTo(1L)); + assertThat(lifecycleState.getPhaseTime(), nullValue()); + assertThat(lifecycleState.getActionTime(), nullValue()); + assertThat(lifecycleState.getStepInfo(), nullValue()); + } + + public void testExecuteInvalidStartStep() throws IOException { + // Unset the index's phase/action/step to simulate starting from scratch + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder( + LifecycleExecutionState.fromIndexMetadata(clusterState.getMetaData().index(index))); + lifecycleState.setPhase(null); + lifecycleState.setAction(null); + lifecycleState.setStep(null); + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.getMetaData()) + .put(IndexMetaData.builder(clusterState.getMetaData().index(index)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()))).build(); + + policyStepsRegistry.update(clusterState); + + Step invalidStep = new MockClusterStateActionStep(firstStepKey, secondStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(invalidPolicyName, index, + invalidStep, policyStepsRegistry, null, () -> now); + ClusterState newState = task.execute(clusterState); + assertSame(newState, clusterState); + } + + public void testExecuteIncompleteWaitStepNoInfo() throws IOException { + secondStep.setWillComplete(false); + setStateToKey(secondStepKey); + Step startStep = policyStepsRegistry.getStep(indexMetaData, secondStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(mixedPolicyName, index, startStep, policyStepsRegistry, null, () -> now); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey currentStepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(currentStepKey, equalTo(secondStepKey)); + assertThat(firstStep.getExecuteCount(), equalTo(0L)); + assertThat(secondStep.getExecuteCount(), equalTo(1L)); + assertThat(lifecycleState.getPhaseTime(), nullValue()); + assertThat(lifecycleState.getActionTime(), nullValue()); + assertThat(lifecycleState.getStepInfo(), nullValue()); + } + + public void testExecuteIncompleteWaitStepWithInfo() throws IOException { + secondStep.setWillComplete(false); + RandomStepInfo stepInfo = new RandomStepInfo(() -> randomAlphaOfLength(10)); + secondStep.expectedInfo(stepInfo); + setStateToKey(secondStepKey); + Step startStep = policyStepsRegistry.getStep(indexMetaData, secondStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(mixedPolicyName, index, startStep, policyStepsRegistry, null, () -> now); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey currentStepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(currentStepKey, equalTo(secondStepKey)); + assertThat(firstStep.getExecuteCount(), equalTo(0L)); + assertThat(secondStep.getExecuteCount(), equalTo(1L)); + assertThat(lifecycleState.getPhaseTime(), nullValue()); + assertThat(lifecycleState.getActionTime(), nullValue()); + assertThat(lifecycleState.getStepInfo(), equalTo(stepInfo.toString())); + } + + public void testOnFailure() throws IOException { + setStateToKey(secondStepKey); + Step startStep = policyStepsRegistry.getStep(indexMetaData, secondStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(mixedPolicyName, index, startStep, policyStepsRegistry, null, () -> now); + Exception expectedException = new RuntimeException(); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> task.onFailure(randomAlphaOfLength(10), expectedException)); + assertEquals("policy [" + mixedPolicyName + "] for index [" + index.getName() + "] failed on step [" + startStep.getKey() + "].", + exception.getMessage()); + assertSame(expectedException, exception.getCause()); + } + + public void testClusterActionStepThrowsException() throws IOException { + RuntimeException thrownException = new RuntimeException("error"); + firstStep.setException(thrownException); + setStateToKey(firstStepKey); + Step startStep = policyStepsRegistry.getStep(indexMetaData, firstStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(mixedPolicyName, index, startStep, policyStepsRegistry, null, () -> now); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey currentStepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(currentStepKey, equalTo(new StepKey(firstStepKey.getPhase(), firstStepKey.getAction(), ErrorStep.NAME))); + assertThat(firstStep.getExecuteCount(), equalTo(1L)); + assertThat(secondStep.getExecuteCount(), equalTo(0L)); + assertThat(task.getNextStepKey(), equalTo(secondStep.getKey())); + assertThat(lifecycleState.getPhaseTime(), nullValue()); + assertThat(lifecycleState.getActionTime(), nullValue()); + assertThat(lifecycleState.getStepInfo(), equalTo("{\"type\":\"runtime_exception\",\"reason\":\"error\"}")); + } + + public void testClusterWaitStepThrowsException() throws IOException { + RuntimeException thrownException = new RuntimeException("error"); + secondStep.setException(thrownException); + setStateToKey(firstStepKey); + Step startStep = policyStepsRegistry.getStep(indexMetaData, firstStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(mixedPolicyName, index, startStep, policyStepsRegistry, null, () -> now); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey currentStepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(currentStepKey, equalTo(new StepKey(firstStepKey.getPhase(), firstStepKey.getAction(), ErrorStep.NAME))); + assertThat(firstStep.getExecuteCount(), equalTo(1L)); + assertThat(secondStep.getExecuteCount(), equalTo(1L)); + assertThat(task.getNextStepKey(), equalTo(thirdStepKey)); + assertThat(lifecycleState.getPhaseTime(), nullValue()); + assertThat(lifecycleState.getActionTime(), nullValue()); + assertThat(lifecycleState.getStepInfo(), equalTo("{\"type\":\"runtime_exception\",\"reason\":\"error\"}")); + } + + private void setStateToKey(StepKey stepKey) throws IOException { + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder( + LifecycleExecutionState.fromIndexMetadata(clusterState.getMetaData().index(index))); + lifecycleState.setPhase(stepKey.getPhase()); + lifecycleState.setAction(stepKey.getAction()); + lifecycleState.setStep(stepKey.getName()); + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.getMetaData()) + .put(IndexMetaData.builder(clusterState.getMetaData().index(index)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()))).build(); + policyStepsRegistry.update(clusterState); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSetTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSetTests.java new file mode 100644 index 0000000000000..d83a41b4e60bc --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSetTests.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PolicyStats; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.junit.Before; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class IndexLifecycleFeatureSetTests extends ESTestCase { + + private XPackLicenseState licenseState; + private ClusterService clusterService; + + @Before + public void init() throws Exception { + licenseState = mock(XPackLicenseState.class); + clusterService = mock(ClusterService.class); + } + + public void testAvailable() { + IndexLifecycleFeatureSet featureSet = new IndexLifecycleFeatureSet(Settings.EMPTY, licenseState, clusterService); + + when(licenseState.isIndexLifecycleAllowed()).thenReturn(false); + assertThat(featureSet.available(), equalTo(false)); + + when(licenseState.isIndexLifecycleAllowed()).thenReturn(true); + assertThat(featureSet.available(), equalTo(true)); + + featureSet = new IndexLifecycleFeatureSet(Settings.EMPTY, null, clusterService); + assertThat(featureSet.available(), equalTo(false)); + } + + public void testEnabled() { + Settings.Builder settings = Settings.builder().put("xpack.ilm.enabled", false); + IndexLifecycleFeatureSet featureSet = new IndexLifecycleFeatureSet(settings.build(), licenseState, clusterService); + assertThat(featureSet.enabled(), equalTo(false)); + + settings = Settings.builder().put("xpack.ilm.enabled", true); + featureSet = new IndexLifecycleFeatureSet(settings.build(), licenseState, clusterService); + assertThat(featureSet.enabled(), equalTo(true)); + } + + public void testName() { + IndexLifecycleFeatureSet featureSet = new IndexLifecycleFeatureSet(Settings.EMPTY, licenseState, clusterService); + assertThat(featureSet.name(), equalTo("ilm")); + } + + public void testNativeCodeInfo() { + IndexLifecycleFeatureSet featureSet = new IndexLifecycleFeatureSet(Settings.EMPTY, licenseState, clusterService); + assertNull(featureSet.nativeCodeInfo()); + } + + public void testUsageStats() throws Exception { + Map indexPolicies = new HashMap<>(); + List policies = new ArrayList<>(); + String policy1Name = randomAlphaOfLength(10); + String policy2Name = randomAlphaOfLength(10); + String policy3Name = randomAlphaOfLength(10); + indexPolicies.put("index_1", policy1Name); + indexPolicies.put("index_2", policy1Name); + indexPolicies.put("index_3", policy1Name); + indexPolicies.put("index_4", policy1Name); + indexPolicies.put("index_5", policy3Name); + LifecyclePolicy policy1 = new LifecyclePolicy(policy1Name, Collections.emptyMap()); + policies.add(policy1); + PolicyStats policy1Stats = new PolicyStats(Collections.emptyMap(), 4); + + Map phases1 = new HashMap<>(); + LifecyclePolicy policy2 = new LifecyclePolicy(policy2Name, phases1); + policies.add(policy2); + PolicyStats policy2Stats = new PolicyStats(Collections.emptyMap(), 0); + + LifecyclePolicy policy3 = new LifecyclePolicy(policy3Name, Collections.emptyMap()); + policies.add(policy3); + PolicyStats policy3Stats = new PolicyStats(Collections.emptyMap(), 1); + + ClusterState clusterState = buildClusterState(policies, indexPolicies); + Mockito.when(clusterService.state()).thenReturn(clusterState); + + PlainActionFuture future = new PlainActionFuture<>(); + IndexLifecycleFeatureSet ilmFeatureSet = new IndexLifecycleFeatureSet(Settings.EMPTY, licenseState, clusterService); + ilmFeatureSet.usage(future); + IndexLifecycleFeatureSetUsage ilmUsage = (IndexLifecycleFeatureSetUsage) future.get(); + assertThat(ilmUsage.enabled(), equalTo(ilmFeatureSet.enabled())); + assertThat(ilmUsage.available(), equalTo(ilmFeatureSet.available())); + + List policyStatsList = ilmUsage.getPolicyStats(); + assertThat(policyStatsList.size(), equalTo(policies.size())); + assertTrue(policyStatsList.contains(policy1Stats)); + assertTrue(policyStatsList.contains(policy2Stats)); + assertTrue(policyStatsList.contains(policy3Stats)); + + } + + private ClusterState buildClusterState(List lifecyclePolicies, Map indexPolicies) { + Map lifecyclePolicyMetadatasMap = lifecyclePolicies.stream() + .map(p -> new LifecyclePolicyMetadata(p, Collections.emptyMap(), 1, 0L)) + .collect(Collectors.toMap(LifecyclePolicyMetadata::getName, Function.identity())); + IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(lifecyclePolicyMetadatasMap, OperationMode.RUNNING); + + MetaData.Builder metadata = MetaData.builder().putCustom(IndexLifecycleMetadata.TYPE, indexLifecycleMetadata); + indexPolicies.forEach((indexName, policyName) -> { + Settings indexSettings = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policyName) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + IndexMetaData.Builder indexMetadata = IndexMetaData.builder(indexName).settings(indexSettings); + metadata.put(indexMetadata); + }); + + return ClusterState.builder(new ClusterName("my_cluster")).metaData(metadata).build(); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSetUsageTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSetUsageTests.java new file mode 100644 index 0000000000000..7bd974d31c176 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSetUsageTests.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PolicyStats; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class IndexLifecycleFeatureSetUsageTests extends AbstractWireSerializingTestCase { + + @Override + protected IndexLifecycleFeatureSetUsage createTestInstance() { + boolean available = randomBoolean(); + boolean enabled = randomBoolean(); + List policyStats = new ArrayList<>(); + int size = randomIntBetween(0, 10); + for (int i = 0; i < size; i++) { + policyStats.add(PolicyStatsTests.randomPolicyStats()); + } + return new IndexLifecycleFeatureSetUsage(available, enabled, policyStats); + } + + @Override + protected IndexLifecycleFeatureSetUsage mutateInstance(IndexLifecycleFeatureSetUsage instance) throws IOException { + boolean available = instance.available(); + boolean enabled = instance.enabled(); + List policyStats = instance.getPolicyStats(); + switch (between(0, 2)) { + case 0: + available = available == false; + break; + case 1: + enabled = enabled == false; + break; + case 2: + policyStats = new ArrayList<>(policyStats); + policyStats.add(PolicyStatsTests.randomPolicyStats()); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new IndexLifecycleFeatureSetUsage(available, enabled, policyStats); + } + + @Override + protected Reader instanceReader() { + return IndexLifecycleFeatureSetUsage::new; + } + +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java new file mode 100644 index 0000000000000..a041232d8a7e7 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java @@ -0,0 +1,492 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.index.Index; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateWaitStep; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleResponse; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleExplainResponse; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.MockAction; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.PhaseExecutionInfo; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; +import org.elasticsearch.xpack.core.indexlifecycle.action.ExplainLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; +import org.junit.Before; + +import java.io.IOException; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; + +import static org.elasticsearch.client.Requests.clusterHealthRequest; +import static org.elasticsearch.client.Requests.createIndexRequest; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTestsUtils.newLockableLifecyclePolicy; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.core.CombinableMatcher.both; +import static org.hamcrest.core.Is.is; +import static org.hamcrest.core.IsNull.nullValue; + +@ESIntegTestCase.ClusterScope(scope = Scope.TEST, numDataNodes = 0) +public class IndexLifecycleInitialisationTests extends ESIntegTestCase { + private Settings settings; + private LifecyclePolicy lifecyclePolicy; + private Phase mockPhase; + private static final ObservableAction OBSERVABLE_ACTION; + static { + List steps = new ArrayList<>(); + Step.StepKey key = new Step.StepKey("mock", ObservableAction.NAME, ObservableClusterStateWaitStep.NAME); + steps.add(new ObservableClusterStateWaitStep(key, TerminalPolicyStep.KEY)); + OBSERVABLE_ACTION = new ObservableAction(steps, true); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + settings.put(XPackSettings.INDEX_LIFECYCLE_ENABLED.getKey(), true); + settings.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false); + settings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); + settings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); + settings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); + settings.put(XPackSettings.GRAPH_ENABLED.getKey(), false); + settings.put(XPackSettings.LOGSTASH_ENABLED.getKey(), false); + settings.put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, "1s"); + return settings.build(); + } + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @Override + protected Settings transportClientSettings() { + Settings.Builder settings = Settings.builder().put(super.transportClientSettings()); + settings.put(XPackSettings.INDEX_LIFECYCLE_ENABLED.getKey(), true); + settings.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false); + settings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); + settings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); + settings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); + settings.put(XPackSettings.GRAPH_ENABLED.getKey(), false); + settings.put(XPackSettings.LOGSTASH_ENABLED.getKey(), false); + return settings.build(); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class, TestILMPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return nodePlugins(); + } + + @Before + public void init() { + settings = Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 0).put(LifecycleSettings.LIFECYCLE_NAME, "test").build(); + List steps = new ArrayList<>(); + Step.StepKey key = new Step.StepKey("mock", ObservableAction.NAME, ObservableClusterStateWaitStep.NAME); + steps.add(new ObservableClusterStateWaitStep(key, TerminalPolicyStep.KEY)); + Map actions = Collections.singletonMap(ObservableAction.NAME, OBSERVABLE_ACTION); + mockPhase = new Phase("mock", TimeValue.timeValueSeconds(0), actions); + Map phases = Collections.singletonMap("mock", mockPhase); + lifecyclePolicy = newLockableLifecyclePolicy("test", phases); + } + + public void testSingleNodeCluster() throws Exception { + settings = Settings.builder().put(settings).put("index.lifecycle.test.complete", true).build(); + // start master node + logger.info("Starting server1"); + final String server_1 = internalCluster().startNode(); + final String node1 = getLocalNodeId(server_1); + + // test get-lifecycle behavior when IndexLifecycleMetaData is null + GetLifecycleAction.Response getUninitializedLifecycleResponse = client().execute(GetLifecycleAction.INSTANCE, + new GetLifecycleAction.Request()).get(); + assertThat(getUninitializedLifecycleResponse.getPolicies().size(), equalTo(0)); + ExecutionException exception = expectThrows(ExecutionException.class,() -> client() + .execute(GetLifecycleAction.INSTANCE, new GetLifecycleAction.Request("non-existent-policy")).get()); + assertThat(exception.getMessage(), containsString("Lifecycle policy not found: [non-existent-policy]")); + + logger.info("Creating lifecycle [test_lifecycle]"); + PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); + long lowerBoundModifiedDate = Instant.now().toEpochMilli(); + PutLifecycleAction.Response putLifecycleResponse = client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get(); + assertAcked(putLifecycleResponse); + long upperBoundModifiedDate = Instant.now().toEpochMilli(); + + // assert version and modified_date + GetLifecycleAction.Response getLifecycleResponse = client().execute(GetLifecycleAction.INSTANCE, + new GetLifecycleAction.Request()).get(); + assertThat(getLifecycleResponse.getPolicies().size(), equalTo(1)); + GetLifecycleAction.LifecyclePolicyResponseItem responseItem = getLifecycleResponse.getPolicies().get(0); + assertThat(responseItem.getLifecyclePolicy(), equalTo(lifecyclePolicy)); + assertThat(responseItem.getVersion(), equalTo(1L)); + long actualModifiedDate = Instant.parse(responseItem.getModifiedDate()).toEpochMilli(); + assertThat(actualModifiedDate, + is(both(greaterThanOrEqualTo(lowerBoundModifiedDate)).and(lessThanOrEqualTo(upperBoundModifiedDate)))); + + logger.info("Creating index [test]"); + CreateIndexResponse createIndexResponse = client().admin().indices().create(createIndexRequest("test").settings(settings)) + .actionGet(); + assertAcked(createIndexResponse); + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + RoutingNode routingNodeEntry1 = clusterState.getRoutingNodes().node(node1); + assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(1)); + assertBusy(() -> { + assertEquals(true, client().admin().indices().prepareExists("test").get().isExists()); + }); + IndexLifecycleService indexLifecycleService = internalCluster().getInstance(IndexLifecycleService.class, server_1); + assertThat(indexLifecycleService.getScheduler().jobCount(), equalTo(1)); + assertNotNull(indexLifecycleService.getScheduledJob()); + assertBusy(() -> { + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(client().admin().cluster() + .prepareState().execute().actionGet().getState().getMetaData().index("test")); + assertThat(lifecycleState.getStep(), equalTo(TerminalPolicyStep.KEY.getName())); + }); + } + + public void testExplainExecution() throws Exception { + // start node + logger.info("Starting server1"); + final String server_1 = internalCluster().startNode(); + logger.info("Creating lifecycle [test_lifecycle]"); + PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); + PutLifecycleAction.Response putLifecycleResponse = client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get(); + assertAcked(putLifecycleResponse); + + GetLifecycleAction.Response getLifecycleResponse = client().execute(GetLifecycleAction.INSTANCE, + new GetLifecycleAction.Request()).get(); + assertThat(getLifecycleResponse.getPolicies().size(), equalTo(1)); + GetLifecycleAction.LifecyclePolicyResponseItem responseItem = getLifecycleResponse.getPolicies().get(0); + assertThat(responseItem.getLifecyclePolicy(), equalTo(lifecyclePolicy)); + assertThat(responseItem.getVersion(), equalTo(1L)); + long actualModifiedDate = Instant.parse(responseItem.getModifiedDate()).toEpochMilli(); + + logger.info("Creating index [test]"); + CreateIndexResponse createIndexResponse = client().admin().indices().create(createIndexRequest("test").settings(settings)) + .actionGet(); + assertAcked(createIndexResponse); + + { + PhaseExecutionInfo expectedExecutionInfo = new PhaseExecutionInfo(lifecyclePolicy.getName(), mockPhase, 1L, actualModifiedDate); + assertBusy(() -> { + ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest(); + ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get(); + assertThat(explainResponse.getIndexResponses().size(), equalTo(1)); + IndexLifecycleExplainResponse indexResponse = explainResponse.getIndexResponses().get("test"); + assertThat(indexResponse.getStep(), equalTo("observable_cluster_state_action")); + assertThat(indexResponse.getPhaseExecutionInfo(), equalTo(expectedExecutionInfo)); + }); + } + + // complete the step + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Collections.singletonMap("index.lifecycle.test.complete", true)).get(); + + { + PhaseExecutionInfo expectedExecutionInfo = new PhaseExecutionInfo(lifecyclePolicy.getName(), null, 1L, actualModifiedDate); + assertBusy(() -> { + ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest(); + ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get(); + assertThat(explainResponse.getIndexResponses().size(), equalTo(1)); + IndexLifecycleExplainResponse indexResponse = explainResponse.getIndexResponses().get("test"); + assertThat(indexResponse.getPhase(), equalTo(TerminalPolicyStep.COMPLETED_PHASE)); + assertThat(indexResponse.getStep(), equalTo(TerminalPolicyStep.KEY.getName())); + assertThat(indexResponse.getPhaseExecutionInfo(), equalTo(expectedExecutionInfo)); + }); + } + } + + public void testMasterDedicatedDataDedicated() throws Exception { + settings = Settings.builder().put(settings).put("index.lifecycle.test.complete", true).build(); + // start master node + logger.info("Starting master-only server1"); + final String server_1 = internalCluster().startMasterOnlyNode(); + // start data node + logger.info("Starting data-only server2"); + final String server_2 = internalCluster().startDataOnlyNode(); + final String node2 = getLocalNodeId(server_2); + + // check that the scheduler was started on the appropriate node + { + IndexLifecycleService indexLifecycleService = internalCluster().getInstance(IndexLifecycleService.class, server_1); + assertThat(indexLifecycleService.getScheduler().jobCount(), equalTo(1)); + assertNotNull(indexLifecycleService.getScheduledJob()); + } + { + IndexLifecycleService indexLifecycleService = internalCluster().getInstance(IndexLifecycleService.class, server_2); + assertNull(indexLifecycleService.getScheduler()); + assertNull(indexLifecycleService.getScheduledJob()); + } + + logger.info("Creating lifecycle [test_lifecycle]"); + PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); + PutLifecycleAction.Response putLifecycleResponse = client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get(); + assertAcked(putLifecycleResponse); + logger.info("Creating index [test]"); + CreateIndexResponse createIndexResponse = client().admin().indices().create(createIndexRequest("test").settings(settings)) + .actionGet(); + assertAcked(createIndexResponse); + + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + RoutingNode routingNodeEntry1 = clusterState.getRoutingNodes().node(node2); + assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(1)); + + assertBusy(() -> { + assertEquals(true, client().admin().indices().prepareExists("test").get().isExists()); + }); + assertBusy(() -> { + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(client().admin().cluster() + .prepareState().execute().actionGet().getState().getMetaData().index("test")); + assertThat(lifecycleState.getStep(), equalTo(TerminalPolicyStep.KEY.getName())); + }); + } + + public void testMasterFailover() throws Exception { + // start one server + logger.info("Starting sever1"); + final String server_1 = internalCluster().startNode(); + final String node1 = getLocalNodeId(server_1); + + logger.info("Creating lifecycle [test_lifecycle]"); + PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); + PutLifecycleAction.Response putLifecycleResponse = client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get(); + assertAcked(putLifecycleResponse); + + logger.info("Creating index [test]"); + CreateIndexResponse createIndexResponse = client().admin().indices().create(createIndexRequest("test").settings(settings)) + .actionGet(); + assertAcked(createIndexResponse); + + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + RoutingNode routingNodeEntry1 = clusterState.getRoutingNodes().node(node1); + assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(1)); + + logger.info("Starting server2"); + // start another server + internalCluster().startNode(); + + // first wait for 2 nodes in the cluster + logger.info("Waiting for replicas to be assigned"); + ClusterHealthResponse clusterHealth = client().admin().cluster() + .health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet(); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); + assertThat(clusterHealth.isTimedOut(), equalTo(false)); + assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); + + // check step in progress in lifecycle + assertBusy(() -> { + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(client().admin().cluster() + .prepareState().execute().actionGet().getState().getMetaData().index("test")); + assertThat(lifecycleState.getStep(), equalTo(ObservableClusterStateWaitStep.NAME)); + }); + + if (randomBoolean()) { + // this checks that the phase execution is picked up from the phase definition settings + logger.info("updating lifecycle [test_lifecycle] to be empty"); + PutLifecycleAction.Request updateLifecycleRequest = new PutLifecycleAction.Request + (newLockableLifecyclePolicy(lifecyclePolicy.getName(), Collections.emptyMap())); + PutLifecycleAction.Response updateLifecycleResponse = client() + .execute(PutLifecycleAction.INSTANCE, updateLifecycleRequest).get(); + assertAcked(updateLifecycleResponse); + } + + + logger.info("Closing server1"); + // kill the first server + internalCluster().stopCurrentMasterNode(); + + // check that index lifecycle picked back up where it + assertBusy(() -> { + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(client().admin().cluster() + .prepareState().execute().actionGet().getState().getMetaData().index("test")); + assertThat(lifecycleState.getStep(), equalTo(ObservableClusterStateWaitStep.NAME)); + }); + + logger.info("new master is operation"); + // complete the step + AcknowledgedResponse repsonse = client().admin().indices().prepareUpdateSettings("test") + .setSettings(Collections.singletonMap("index.lifecycle.test.complete", true)).get(); + + assertBusy(() -> { + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(client().admin().cluster() + .prepareState().execute().actionGet().getState().getMetaData().index("test")); + assertThat(lifecycleState.getStep(), equalTo(TerminalPolicyStep.KEY.getName())); + }); + } + + public void testPollIntervalUpdate() throws Exception { + TimeValue pollInterval = TimeValue.timeValueSeconds(randomLongBetween(1, 5)); + final String server_1 = internalCluster().startMasterOnlyNode( + Settings.builder().put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, pollInterval.getStringRep()).build()); + IndexLifecycleService indexLifecycleService = internalCluster().getInstance(IndexLifecycleService.class, server_1); + assertBusy(() -> { + assertNotNull(indexLifecycleService.getScheduler()); + assertThat(indexLifecycleService.getScheduler().jobCount(), equalTo(1)); + }); + { + TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().getSchedule(); + assertThat(schedule.getInterval(), equalTo(pollInterval)); + } + + // update the poll interval + TimeValue newPollInterval = TimeValue.timeValueHours(randomLongBetween(6, 1000)); + Settings newIntervalSettings = Settings.builder().put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, + newPollInterval.getStringRep()).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(newIntervalSettings)); + { + TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().getSchedule(); + assertThat(schedule.getInterval(), equalTo(newPollInterval)); + } + } + + private String getLocalNodeId(String name) { + TransportService transportService = internalCluster().getInstance(TransportService.class, name); + String nodeId = transportService.getLocalNode().getId(); + assertThat(nodeId, not(nullValue())); + return nodeId; + } + + public static class TestILMPlugin extends Plugin { + public TestILMPlugin() { + } + + public List> getSettings() { + final Setting COMPLETE_SETTING = Setting.boolSetting("index.lifecycle.test.complete", false, + Setting.Property.Dynamic, Setting.Property.IndexScope); + return Collections.singletonList(COMPLETE_SETTING); + } + + @Override + public List getNamedXContent() { + return Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ObservableAction.NAME), (p) -> { + MockAction.parse(p); + return OBSERVABLE_ACTION; + }) + ); + } + + @Override + public List getNamedWriteables() { + return Arrays.asList(new NamedWriteableRegistry.Entry(LifecycleType.class, LockableLifecycleType.TYPE, + (in) -> LockableLifecycleType.INSTANCE), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ObservableAction.NAME, ObservableAction::readObservableAction), + new NamedWriteableRegistry.Entry(ObservableClusterStateWaitStep.class, ObservableClusterStateWaitStep.NAME, + ObservableClusterStateWaitStep::new)); + } + } + + public static class ObservableClusterStateWaitStep extends ClusterStateWaitStep implements NamedWriteable { + public static final String NAME = "observable_cluster_state_action"; + + public ObservableClusterStateWaitStep(StepKey current, StepKey next) { + super(current, next); + } + + public ObservableClusterStateWaitStep(StreamInput in) throws IOException { + this(new StepKey(in.readString(), in.readString(), in.readString()), readOptionalNextStepKey(in)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(getKey().getPhase()); + out.writeString(getKey().getAction()); + out.writeString(getKey().getName()); + boolean hasNextStep = getNextStepKey() != null; + out.writeBoolean(hasNextStep); + if (hasNextStep) { + out.writeString(getNextStepKey().getPhase()); + out.writeString(getNextStepKey().getAction()); + out.writeString(getNextStepKey().getName()); + } + } + + private static StepKey readOptionalNextStepKey(StreamInput in) throws IOException { + if (in.readBoolean()) { + return new StepKey(in.readString(), in.readString(), in.readString()); + } + return null; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Result isConditionMet(Index index, ClusterState clusterState) { + boolean complete = clusterState.metaData().index("test").getSettings() + .getAsBoolean("index.lifecycle.test.complete", false); + return new Result(complete, null); + } + } + + public static class ObservableAction extends MockAction { + + ObservableAction(List steps, boolean safe) { + super(steps, safe); + } + + public static ObservableAction readObservableAction(StreamInput in) throws IOException { + List steps = in.readList(ObservableClusterStateWaitStep::new); + boolean safe = in.readBoolean(); + return new ObservableAction(steps, safe); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(getSteps().stream().map(s -> (ObservableClusterStateWaitStep) s).collect(Collectors.toList())); + out.writeBoolean(isSafeAction()); + } + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java new file mode 100644 index 0000000000000..fe24b555a7b27 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.MetaData.Custom; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.test.AbstractDiffableSerializationTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; +import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; +import org.elasticsearch.xpack.core.indexlifecycle.ForceMergeAction; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata.IndexLifecycleMetadataDiff; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.ReadOnlyAction; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTestsUtils.newTestLifecyclePolicy; +import static org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTestsUtils.randomTimeseriesLifecyclePolicy; + +public class IndexLifecycleMetadataTests extends AbstractDiffableSerializationTestCase { + + @Override + protected IndexLifecycleMetadata createTestInstance() { + int numPolicies = randomIntBetween(1, 5); + Map policies = new HashMap<>(numPolicies); + for (int i = 0; i < numPolicies; i++) { + LifecyclePolicy policy = randomTimeseriesLifecyclePolicy(randomAlphaOfLength(4) + i); + policies.put(policy.getName(), new LifecyclePolicyMetadata(policy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + } + return new IndexLifecycleMetadata(policies, randomFrom(OperationMode.values())); + } + + @Override + protected IndexLifecycleMetadata doParseInstance(XContentParser parser) throws IOException { + return IndexLifecycleMetadata.PARSER.apply(parser, null); + } + + @Override + protected Reader instanceReader() { + return IndexLifecycleMetadata::new; + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + Arrays.asList( + new NamedWriteableRegistry.Entry(LifecycleType.class, TimeseriesLifecycleType.TYPE, + (in) -> TimeseriesLifecycleType.INSTANCE), + new NamedWriteableRegistry.Entry(LifecycleAction.class, AllocateAction.NAME, AllocateAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, DeleteAction.NAME, DeleteAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ForceMergeAction.NAME, ForceMergeAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ReadOnlyAction.NAME, ReadOnlyAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, RolloverAction.NAME, RolloverAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ShrinkAction.NAME, ShrinkAction::new) + )); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), + (p) -> TimeseriesLifecycleType.INSTANCE), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse) + )); + return new NamedXContentRegistry(entries); + } + + @Override + protected MetaData.Custom mutateInstance(MetaData.Custom instance) { + IndexLifecycleMetadata metadata = (IndexLifecycleMetadata) instance; + Map policies = metadata.getPolicyMetadatas(); + policies = new TreeMap<>(policies); + OperationMode mode = metadata.getOperationMode(); + if (randomBoolean()) { + String policyName = randomAlphaOfLength(10); + policies.put(policyName, new LifecyclePolicyMetadata(randomTimeseriesLifecyclePolicy(policyName), Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + } else { + mode = randomValueOtherThan(metadata.getOperationMode(), () -> randomFrom(OperationMode.values())); + } + return new IndexLifecycleMetadata(policies, mode); + } + + @Override + protected Custom makeTestChanges(Custom testInstance) { + return mutateInstance(testInstance); + } + + @Override + protected Reader> diffReader() { + return IndexLifecycleMetadataDiff::new; + } + + public void testMinimumSupportedVersion() { + assertEquals(Version.V_6_5_0, createTestInstance().getMinimalSupportedVersion()); + } + + public void testcontext() { + assertEquals(MetaData.ALL_CONTEXTS, createTestInstance().context()); + } + + public static IndexLifecycleMetadata createTestInstance(int numPolicies, OperationMode mode) { + SortedMap policies = new TreeMap<>(); + for (int i = 0; i < numPolicies; i++) { + int numberPhases = randomInt(5); + Map phases = new HashMap<>(numberPhases); + for (int j = 0; j < numberPhases; j++) { + TimeValue after = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + Map actions = Collections.emptyMap(); + if (randomBoolean()) { + actions = Collections.singletonMap(DeleteAction.NAME, new DeleteAction()); + } + String phaseName = randomAlphaOfLength(10); + phases.put(phaseName, new Phase(phaseName, after, actions)); + } + String policyName = randomAlphaOfLength(10); + policies.put(policyName, new LifecyclePolicyMetadata(newTestLifecyclePolicy(policyName, phases), Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + } + return new IndexLifecycleMetadata(policies, mode); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java new file mode 100644 index 0000000000000..7ecc97b82cf3e --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java @@ -0,0 +1,1471 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.Settings.Builder; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.Index; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.indexlifecycle.AbstractStepTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.AsyncActionStep; +import org.elasticsearch.xpack.core.indexlifecycle.AsyncWaitStep; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateActionStep; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateWaitStep; +import org.elasticsearch.xpack.core.indexlifecycle.ErrorStep; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTests; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.MockAction; +import org.elasticsearch.xpack.core.indexlifecycle.MockStep; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.PhaseExecutionInfo; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; +import org.mockito.ArgumentMatcher; +import org.mockito.Mockito; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTestsUtils.newTestLifecyclePolicy; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; + +public class IndexLifecycleRunnerTests extends ESTestCase { + private static final NamedXContentRegistry REGISTRY; + + static { + try (IndexLifecycle indexLifecycle = new IndexLifecycle(Settings.EMPTY)) { + List entries = new ArrayList<>(indexLifecycle.getNamedXContent()); + REGISTRY = new NamedXContentRegistry(entries); + } + } + + /** A real policy steps registry where getStep can be overridden so that JSON doesn't have to be parsed */ + private class MockPolicyStepsRegistry extends PolicyStepsRegistry { + private BiFunction fn = null; + + MockPolicyStepsRegistry(SortedMap lifecyclePolicyMap, Map firstStepMap, + Map> stepMap, NamedXContentRegistry xContentRegistry, Client client) { + super(lifecyclePolicyMap, firstStepMap, stepMap, xContentRegistry, client); + } + + public void setResolver(BiFunction fn) { + this.fn = fn; + } + + @Override + public Step getStep(IndexMetaData indexMetaData, StepKey stepKey) { + if (fn == null) { + logger.info("--> retrieving step {}", stepKey); + return super.getStep(indexMetaData, stepKey); + } else { + logger.info("--> returning mock step"); + return fn.apply(indexMetaData, stepKey); + } + } + } + + private MockPolicyStepsRegistry createOneStepPolicyStepRegistry(String policyName, Step step) { + return createOneStepPolicyStepRegistry(policyName, step, "test"); + } + + private MockPolicyStepsRegistry createOneStepPolicyStepRegistry(String policyName, Step step, String indexName) { + LifecyclePolicy policy = new LifecyclePolicy(policyName, new HashMap<>()); + SortedMap lifecyclePolicyMap = new TreeMap<>(); + lifecyclePolicyMap.put(policyName, new LifecyclePolicyMetadata(policy, new HashMap<>(), 1, 1)); + Map firstStepMap = new HashMap<>(); + firstStepMap.put(policyName, step); + Map> stepMap = new HashMap<>(); + Map policySteps = new HashMap<>(); + policySteps.put(step.getKey(), step); + stepMap.put(policyName, policySteps); + Map> indexSteps = new HashMap<>(); + List steps = new ArrayList<>(); + steps.add(step); + Index index = new Index(indexName, indexName + "uuid"); + indexSteps.put(index, steps); + Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + return new MockPolicyStepsRegistry(lifecyclePolicyMap, firstStepMap, stepMap, REGISTRY, client); + } + + public void testRunPolicyTerminalPolicyStep() { + String policyName = "async_action_policy"; + TerminalPolicyStep step = TerminalPolicyStep.INSTANCE; + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + ClusterService clusterService = mock(ClusterService.class); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + Mockito.verifyZeroInteractions(clusterService); + } + + public void testRunPolicyErrorStep() { + String policyName = "async_action_policy"; + StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step"); + MockClusterStateWaitStep step = new MockClusterStateWaitStep(stepKey, null); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + ClusterService clusterService = mock(ClusterService.class); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + LifecycleExecutionState.Builder newState = LifecycleExecutionState.builder(); + newState.setPhase(stepKey.getPhase()); + newState.setAction(stepKey.getAction()); + newState.setStep(ErrorStep.NAME); + IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) + .putCustom(ILM_CUSTOM_METADATA_KEY, newState.build().asMap()) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + Mockito.verifyZeroInteractions(clusterService); + } + + public void testRunStateChangePolicyWithNoNextStep() throws Exception { + String policyName = "foo"; + StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step"); + MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, null); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + ThreadPool threadPool = new TestThreadPool("name"); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .build(); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + DiscoveryNode node = clusterService.localNode(); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .metaData(MetaData.builder() + .put(indexMetaData, true) + .putCustom(IndexLifecycleMetadata.TYPE, ilm)) + .nodes(DiscoveryNodes.builder() + .add(node) + .masterNodeId(node.getId()) + .localNodeId(node.getId())) + .build(); + ClusterServiceUtils.setState(clusterService, state); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + + ClusterState before = clusterService.state(); + CountDownLatch latch = new CountDownLatch(1); + step.setLatch(latch); + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + latch.await(5, TimeUnit.SECONDS); + ClusterState after = clusterService.state(); + + assertEquals(before, after); + assertThat(step.getExecuteCount(), equalTo(1L)); + clusterService.close(); + threadPool.shutdownNow(); + } + + public void testRunStateChangePolicyWithNextStep() throws Exception { + String policyName = "foo"; + StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step"); + StepKey nextStepKey = new StepKey("phase", "action", "next_cluster_state_action_step"); + MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, nextStepKey); + MockClusterStateActionStep nextStep = new MockClusterStateActionStep(nextStepKey, null); + MockPolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + stepRegistry.setResolver((i, k) -> { + if (stepKey.equals(k)) { + return step; + } else if (nextStepKey.equals(k)) { + return nextStep; + } else { + fail("should not try to retrieve different step"); + return null; + } + }); + ThreadPool threadPool = new TestThreadPool("name"); + LifecycleExecutionState les = LifecycleExecutionState.builder() + .setPhase("phase") + .setAction("action") + .setStep("cluster_state_action_step") + .build(); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, les.asMap()) + .build(); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + DiscoveryNode node = clusterService.localNode(); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .metaData(MetaData.builder() + .put(indexMetaData, true) + .putCustom(IndexLifecycleMetadata.TYPE, ilm)) + .nodes(DiscoveryNodes.builder() + .add(node) + .masterNodeId(node.getId()) + .localNodeId(node.getId())) + .build(); + ClusterServiceUtils.setState(clusterService, state); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + + ClusterState before = clusterService.state(); + CountDownLatch latch = new CountDownLatch(1); + step.setLatch(latch); + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + latch.await(5, TimeUnit.SECONDS); + ClusterState after = clusterService.state(); + + assertEquals(before, after); + assertThat(step.getExecuteCount(), equalTo(1L)); + clusterService.close(); + threadPool.shutdownNow(); + } + + public void testRunAsyncActionDoesNotRun() { + String policyName = "foo"; + StepKey stepKey = new StepKey("phase", "action", "async_action_step"); + MockAsyncActionStep step = new MockAsyncActionStep(stepKey, null); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + ThreadPool threadPool = new TestThreadPool("name"); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .build(); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + DiscoveryNode node = clusterService.localNode(); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .metaData(MetaData.builder() + .put(indexMetaData, true) + .putCustom(IndexLifecycleMetadata.TYPE, ilm)) + .nodes(DiscoveryNodes.builder() + .add(node) + .masterNodeId(node.getId()) + .localNodeId(node.getId())) + .build(); + ClusterServiceUtils.setState(clusterService, state); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + + ClusterState before = clusterService.state(); + // State changes should not run AsyncAction steps + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + ClusterState after = clusterService.state(); + + assertEquals(before, after); + assertThat(step.getExecuteCount(), equalTo(0L)); + clusterService.close(); + threadPool.shutdownNow(); + } + + public void testRunStateChangePolicyWithAsyncActionNextStep() throws Exception { + String policyName = "foo"; + StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step"); + StepKey nextStepKey = new StepKey("phase", "action", "async_action_step"); + MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, nextStepKey); + MockAsyncActionStep nextStep = new MockAsyncActionStep(nextStepKey, null); + MockPolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + stepRegistry.setResolver((i, k) -> { + if (stepKey.equals(k)) { + return step; + } else if (nextStepKey.equals(k)) { + return nextStep; + } else { + fail("should not try to retrieve different step"); + return null; + } + }); + ThreadPool threadPool = new TestThreadPool("name"); + LifecycleExecutionState les = LifecycleExecutionState.builder() + .setPhase("phase") + .setAction("action") + .setStep("cluster_state_action_step") + .build(); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, les.asMap()) + .build(); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + DiscoveryNode node = clusterService.localNode(); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .metaData(MetaData.builder() + .put(indexMetaData, true) + .putCustom(IndexLifecycleMetadata.TYPE, ilm)) + .nodes(DiscoveryNodes.builder() + .add(node) + .masterNodeId(node.getId()) + .localNodeId(node.getId())) + .build(); + logger.info("--> state: {}", state); + ClusterServiceUtils.setState(clusterService, state); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + + ClusterState before = clusterService.state(); + CountDownLatch latch = new CountDownLatch(1); + step.setLatch(latch); + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + // Wait for the cluster state action step + latch.await(5, TimeUnit.SECONDS); + + CountDownLatch asyncLatch = new CountDownLatch(1); + nextStep.setLatch(asyncLatch); + + // Wait for the async action step + asyncLatch.await(5, TimeUnit.SECONDS); + ClusterState after = clusterService.state(); + + assertNotEquals(before, after); + assertThat(step.getExecuteCount(), equalTo(1L)); + assertThat(nextStep.getExecuteCount(), equalTo(1L)); + clusterService.close(); + threadPool.shutdownNow(); + } + + public void testRunPeriodicStep() throws Exception { + String policyName = "foo"; + StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step"); + StepKey nextStepKey = new StepKey("phase", "action", "async_action_step"); + MockAsyncWaitStep step = new MockAsyncWaitStep(stepKey, nextStepKey); + MockAsyncWaitStep nextStep = new MockAsyncWaitStep(nextStepKey, null); + MockPolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + stepRegistry.setResolver((i, k) -> { + if (stepKey.equals(k)) { + return step; + } else if (nextStepKey.equals(k)) { + return nextStep; + } else { + fail("should not try to retrieve different step"); + return null; + } + }); + ThreadPool threadPool = new TestThreadPool("name"); + LifecycleExecutionState les = LifecycleExecutionState.builder() + .setPhase("phase") + .setAction("action") + .setStep("cluster_state_action_step") + .build(); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, les.asMap()) + .build(); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + DiscoveryNode node = clusterService.localNode(); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .metaData(MetaData.builder() + .put(indexMetaData, true) + .putCustom(IndexLifecycleMetadata.TYPE, ilm)) + .nodes(DiscoveryNodes.builder() + .add(node) + .masterNodeId(node.getId()) + .localNodeId(node.getId())) + .build(); + logger.info("--> state: {}", state); + ClusterServiceUtils.setState(clusterService, state); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + + ClusterState before = clusterService.state(); + CountDownLatch latch = new CountDownLatch(1); + step.setLatch(latch); + runner.runPeriodicStep(policyName, indexMetaData); + latch.await(5, TimeUnit.SECONDS); + + ClusterState after = clusterService.state(); + + assertEquals(before, after); + assertThat(step.getExecuteCount(), equalTo(1L)); + assertThat(nextStep.getExecuteCount(), equalTo(0L)); + clusterService.close(); + threadPool.shutdownNow(); + } + + public void testRunPolicyClusterStateActionStep() { + String policyName = "cluster_state_action_policy"; + StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step"); + MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, null); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + ClusterService clusterService = mock(ClusterService.class); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + Mockito.verify(clusterService, Mockito.times(1)).submitStateUpdateTask(Mockito.matches("ilm-execute-cluster-state-steps"), + Mockito.argThat(new ExecuteStepsUpdateTaskMatcher(indexMetaData.getIndex(), policyName, step))); + Mockito.verifyNoMoreInteractions(clusterService); + } + + public void testRunPolicyClusterStateWaitStep() { + String policyName = "cluster_state_action_policy"; + StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step"); + MockClusterStateWaitStep step = new MockClusterStateWaitStep(stepKey, null); + step.setWillComplete(true); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + ClusterService clusterService = mock(ClusterService.class); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + Mockito.verify(clusterService, Mockito.times(1)).submitStateUpdateTask(Mockito.matches("ilm-execute-cluster-state-steps"), + Mockito.argThat(new ExecuteStepsUpdateTaskMatcher(indexMetaData.getIndex(), policyName, step))); + Mockito.verifyNoMoreInteractions(clusterService); + } + + public void testRunPolicyAsyncActionStepClusterStateChangeIgnored() { + String policyName = "async_action_policy"; + StepKey stepKey = new StepKey("phase", "action", "async_action_step"); + MockAsyncActionStep step = new MockAsyncActionStep(stepKey, null); + Exception expectedException = new RuntimeException(); + step.setException(expectedException); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + ClusterService clusterService = mock(ClusterService.class); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + assertEquals(0, step.getExecuteCount()); + Mockito.verifyZeroInteractions(clusterService); + } + + public void testRunPolicyAsyncWaitStepClusterStateChangeIgnored() { + String policyName = "async_wait_policy"; + StepKey stepKey = new StepKey("phase", "action", "async_wait_step"); + MockAsyncWaitStep step = new MockAsyncWaitStep(stepKey, null); + Exception expectedException = new RuntimeException(); + step.setException(expectedException); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + ClusterService clusterService = mock(ClusterService.class); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + assertEquals(0, step.getExecuteCount()); + Mockito.verifyZeroInteractions(clusterService); + } + + public void testRunPolicyThatDoesntExist() { + String policyName = "cluster_state_action_policy"; + ClusterService clusterService = mock(ClusterService.class); + IndexLifecycleRunner runner = new IndexLifecycleRunner(new PolicyStepsRegistry(NamedXContentRegistry.EMPTY, null), + clusterService, () -> 0L); + IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + // verify that no exception is thrown + runner.runPolicyAfterStateChange(policyName, indexMetaData); + Mockito.verify(clusterService, Mockito.times(1)).submitStateUpdateTask(Mockito.matches("ilm-set-step-info"), + Mockito.argThat(new SetStepInfoUpdateTaskMatcher(indexMetaData.getIndex(), policyName, null, + (builder, params) -> { + builder.startObject(); + builder.field("reason", "policy [does_not_exist] does not exist"); + builder.field("type", "illegal_argument_exception"); + builder.endObject(); + return builder; + }))); + Mockito.verifyNoMoreInteractions(clusterService); + } + + public void testGetCurrentStepKey() { + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + StepKey stepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState.build()); + assertNull(stepKey); + + String phase = randomAlphaOfLength(20); + String action = randomAlphaOfLength(20); + String step = randomAlphaOfLength(20); + LifecycleExecutionState.Builder lifecycleState2 = LifecycleExecutionState.builder(); + lifecycleState2.setPhase(phase); + lifecycleState2.setAction(action); + lifecycleState2.setStep(step); + stepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState2.build()); + assertNotNull(stepKey); + assertEquals(phase, stepKey.getPhase()); + assertEquals(action, stepKey.getAction()); + assertEquals(step, stepKey.getName()); + + phase = randomAlphaOfLength(20); + action = randomAlphaOfLength(20); + step = null; + LifecycleExecutionState.Builder lifecycleState3 = LifecycleExecutionState.builder(); + lifecycleState3.setPhase(phase); + lifecycleState3.setAction(action); + lifecycleState3.setStep(step); + AssertionError error3 = expectThrows(AssertionError.class, () -> IndexLifecycleRunner.getCurrentStepKey(lifecycleState3.build())); + assertEquals("Current phase is not empty: " + phase, error3.getMessage()); + + phase = null; + action = randomAlphaOfLength(20); + step = null; + LifecycleExecutionState.Builder lifecycleState4 = LifecycleExecutionState.builder(); + lifecycleState4.setPhase(phase); + lifecycleState4.setAction(action); + lifecycleState4.setStep(step); + AssertionError error4 = expectThrows(AssertionError.class, () -> IndexLifecycleRunner.getCurrentStepKey(lifecycleState4.build())); + assertEquals("Current action is not empty: " + action, error4.getMessage()); + + phase = null; + action = randomAlphaOfLength(20); + step = randomAlphaOfLength(20); + LifecycleExecutionState.Builder lifecycleState5 = LifecycleExecutionState.builder(); + lifecycleState5.setPhase(phase); + lifecycleState5.setAction(action); + lifecycleState5.setStep(step); + AssertionError error5 = expectThrows(AssertionError.class, () -> IndexLifecycleRunner.getCurrentStepKey(lifecycleState5.build())); + assertEquals(null, error5.getMessage()); + + phase = null; + action = null; + step = randomAlphaOfLength(20); + LifecycleExecutionState.Builder lifecycleState6 = LifecycleExecutionState.builder(); + lifecycleState6.setPhase(phase); + lifecycleState6.setAction(action); + lifecycleState6.setStep(step); + AssertionError error6 = expectThrows(AssertionError.class, () -> IndexLifecycleRunner.getCurrentStepKey(lifecycleState6.build())); + assertEquals(null, error6.getMessage()); + } + + public void testGetCurrentStep() { + String policyName = "policy"; + StepKey firstStepKey = new StepKey("phase_1", "action_1", "step_1"); + StepKey secondStepKey = new StepKey("phase_1", "action_1", "step_2"); + Step firstStep = new MockStep(firstStepKey, secondStepKey); + Map firstStepMap = new HashMap<>(); + firstStepMap.put(policyName, firstStep); + Map> stepMap = new HashMap<>(); + Index index = new Index("test", "uuid"); + + Step.StepKey MOCK_STEP_KEY = new Step.StepKey("mock", "mock", "mock"); + Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicyWithAllPhases(policyName); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong()); + String phaseName = randomFrom(policy.getPhases().keySet()); + Phase phase = policy.getPhases().get(phaseName); + PhaseExecutionInfo pei = new PhaseExecutionInfo(policy.getName(), phase, 1, randomNonNegativeLong()); + String phaseJson = Strings.toString(pei); + LifecycleAction action = randomFrom(phase.getActions().values()); + Step step = randomFrom(action.toSteps(client, phaseName, MOCK_STEP_KEY)); + Settings indexSettings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName) + .build(); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhaseDefinition(phaseJson); + lifecycleState.setPhase(step.getKey().getPhase()); + lifecycleState.setAction(step.getKey().getAction()); + lifecycleState.setStep(step.getKey().getName()); + IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()) + .settings(indexSettings) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .build(); + SortedMap metas = new TreeMap<>(); + metas.put(policyName, policyMetadata); + PolicyStepsRegistry registry = new PolicyStepsRegistry(metas, firstStepMap, stepMap, REGISTRY, client); + + // First step is retrieved because there are no settings for the index + Step stepFromNoSettings = IndexLifecycleRunner.getCurrentStep(registry, policy.getName(), indexMetaData, + LifecycleExecutionState.builder().build()); + assertEquals(firstStep, stepFromNoSettings); + + // The step that was written into the metadata is retrieved + Step currentStep = IndexLifecycleRunner.getCurrentStep(registry, policy.getName(), indexMetaData, lifecycleState.build()); + assertEquals(step.getKey(), currentStep.getKey()); + } + + public void testMoveClusterStateToNextStep() { + String indexName = "my_index"; + LifecyclePolicy policy = randomValueOtherThanMany(p -> p.getPhases().size() == 0, + () -> LifecyclePolicyTests.randomTestLifecyclePolicy("policy")); + Phase nextPhase = policy.getPhases().values().stream().findFirst().get(); + List policyMetadatas = Collections.singletonList( + new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong())); + StepKey currentStep = new StepKey("current_phase", "current_action", "current_step"); + StepKey nextStep = new StepKey(nextPhase.getName(), "next_action", "next_step"); + long now = randomNonNegativeLong(); + + // test going from null lifecycle settings to next step + ClusterState clusterState = buildClusterState(indexName, + Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, policy.getName()), LifecycleExecutionState.builder().build(), policyMetadatas); + Index index = clusterState.metaData().index(indexName).getIndex(); + ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, + () -> now); + assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); + + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStep.getPhase()); + lifecycleState.setAction(currentStep.getAction()); + lifecycleState.setStep(currentStep.getName()); + // test going from set currentStep settings to nextStep + Builder indexSettingsBuilder = Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, policy.getName()); + if (randomBoolean()) { + lifecycleState.setStepInfo(randomAlphaOfLength(20)); + } + + clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); + index = clusterState.metaData().index(indexName).getIndex(); + newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now); + assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); + } + + public void testMoveClusterStateToNextStepSamePhase() { + String indexName = "my_index"; + StepKey currentStep = new StepKey("current_phase", "current_action", "current_step"); + StepKey nextStep = new StepKey("current_phase", "next_action", "next_step"); + long now = randomNonNegativeLong(); + + ClusterState clusterState = buildClusterState(indexName, Settings.builder(), LifecycleExecutionState.builder().build(), + Collections.emptyList()); + Index index = clusterState.metaData().index(indexName).getIndex(); + ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, + () -> now); + assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); + + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStep.getPhase()); + lifecycleState.setAction(currentStep.getAction()); + lifecycleState.setStep(currentStep.getName()); + if (randomBoolean()) { + lifecycleState.setStepInfo(randomAlphaOfLength(20)); + } + + clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), Collections.emptyList()); + index = clusterState.metaData().index(indexName).getIndex(); + newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now); + assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); + } + + public void testMoveClusterStateToNextStepSameAction() { + String indexName = "my_index"; + StepKey currentStep = new StepKey("current_phase", "current_action", "current_step"); + StepKey nextStep = new StepKey("current_phase", "current_action", "next_step"); + long now = randomNonNegativeLong(); + + ClusterState clusterState = buildClusterState(indexName, Settings.builder(), LifecycleExecutionState.builder().build(), + Collections.emptyList()); + Index index = clusterState.metaData().index(indexName).getIndex(); + ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, + () -> now); + assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); + + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStep.getPhase()); + lifecycleState.setAction(currentStep.getAction()); + lifecycleState.setStep(currentStep.getName()); + if (randomBoolean()) { + lifecycleState.setStepInfo(randomAlphaOfLength(20)); + } + clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), Collections.emptyList()); + index = clusterState.metaData().index(indexName).getIndex(); + newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now); + assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); + } + + public void testSuccessfulValidatedMoveClusterStateToNextStep() { + String indexName = "my_index"; + String policyName = "my_policy"; + LifecyclePolicy policy = randomValueOtherThanMany(p -> p.getPhases().size() == 0, + () -> LifecyclePolicyTests.randomTestLifecyclePolicy(policyName)); + Phase nextPhase = policy.getPhases().values().stream().findFirst().get(); + List policyMetadatas = Collections.singletonList( + new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong())); + StepKey currentStepKey = new StepKey("current_phase", "current_action", "current_step"); + StepKey nextStepKey = new StepKey(nextPhase.getName(), "next_action", "next_step"); + long now = randomNonNegativeLong(); + Step step = new MockStep(nextStepKey, nextStepKey); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step, indexName); + + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStepKey.getPhase()); + lifecycleState.setAction(currentStepKey.getAction()); + lifecycleState.setStep(currentStepKey.getName()); + + Builder indexSettingsBuilder = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policyName); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); + Index index = clusterState.metaData().index(indexName).getIndex(); + ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToStep(indexName, clusterState, currentStepKey, + nextStepKey, () -> now, stepRegistry); + assertClusterStateOnNextStep(clusterState, index, currentStepKey, nextStepKey, newClusterState, now); + } + + public void testValidatedMoveClusterStateToNextStepWithoutPolicy() { + String indexName = "my_index"; + String policyName = "policy"; + StepKey currentStepKey = new StepKey("current_phase", "current_action", "current_step"); + StepKey nextStepKey = new StepKey("next_phase", "next_action", "next_step"); + long now = randomNonNegativeLong(); + Step step = new MockStep(nextStepKey, nextStepKey); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + + Builder indexSettingsBuilder = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, randomBoolean() ? "" : null); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStepKey.getPhase()); + lifecycleState.setAction(currentStepKey.getAction()); + lifecycleState.setStep(currentStepKey.getName()); + + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> IndexLifecycleRunner.moveClusterStateToStep(indexName, clusterState, currentStepKey, + nextStepKey, () -> now, stepRegistry)); + assertThat(exception.getMessage(), equalTo("index [my_index] is not associated with an Index Lifecycle Policy")); + } + + public void testValidatedMoveClusterStateToNextStepInvalidCurrentStep() { + String indexName = "my_index"; + String policyName = "my_policy"; + StepKey currentStepKey = new StepKey("current_phase", "current_action", "current_step"); + StepKey notCurrentStepKey = new StepKey("not_current_phase", "not_current_action", "not_current_step"); + StepKey nextStepKey = new StepKey("next_phase", "next_action", "next_step"); + long now = randomNonNegativeLong(); + Step step = new MockStep(nextStepKey, nextStepKey); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + + Builder indexSettingsBuilder = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policyName); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStepKey.getPhase()); + lifecycleState.setAction(currentStepKey.getAction()); + lifecycleState.setStep(currentStepKey.getName()); + + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> IndexLifecycleRunner.moveClusterStateToStep(indexName, clusterState, notCurrentStepKey, + nextStepKey, () -> now, stepRegistry)); + assertThat(exception.getMessage(), equalTo("index [my_index] is not on current step " + + "[{\"phase\":\"not_current_phase\",\"action\":\"not_current_action\",\"name\":\"not_current_step\"}]")); + } + + public void testValidatedMoveClusterStateToNextStepInvalidNextStep() { + String indexName = "my_index"; + String policyName = "my_policy"; + StepKey currentStepKey = new StepKey("current_phase", "current_action", "current_step"); + StepKey nextStepKey = new StepKey("next_phase", "next_action", "next_step"); + long now = randomNonNegativeLong(); + Step step = new MockStep(currentStepKey, nextStepKey); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + + Builder indexSettingsBuilder = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policyName); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStepKey.getPhase()); + lifecycleState.setAction(currentStepKey.getAction()); + lifecycleState.setStep(currentStepKey.getName()); + + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> IndexLifecycleRunner.moveClusterStateToStep(indexName, clusterState, currentStepKey, + nextStepKey, () -> now, stepRegistry)); + assertThat(exception.getMessage(), + equalTo("step [{\"phase\":\"next_phase\",\"action\":\"next_action\",\"name\":\"next_step\"}] " + + "for index [my_index] with policy [my_policy] does not exist")); + } + + public void testMoveClusterStateToErrorStep() throws IOException { + String indexName = "my_index"; + StepKey currentStep = new StepKey("current_phase", "current_action", "current_step"); + long now = randomNonNegativeLong(); + Exception cause = new ElasticsearchException("THIS IS AN EXPECTED CAUSE"); + + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStep.getPhase()); + lifecycleState.setAction(currentStep.getAction()); + lifecycleState.setStep(currentStep.getName()); + ClusterState clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), Collections.emptyList()); + Index index = clusterState.metaData().index(indexName).getIndex(); + + ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToErrorStep(index, clusterState, currentStep, cause, () -> now); + assertClusterStateOnErrorStep(clusterState, index, currentStep, newClusterState, now, + "{\"type\":\"exception\",\"reason\":\"THIS IS AN EXPECTED CAUSE\"}"); + + cause = new IllegalArgumentException("non elasticsearch-exception"); + newClusterState = IndexLifecycleRunner.moveClusterStateToErrorStep(index, clusterState, currentStep, cause, () -> now); + assertClusterStateOnErrorStep(clusterState, index, currentStep, newClusterState, now, + "{\"type\":\"illegal_argument_exception\",\"reason\":\"non elasticsearch-exception\"}"); + } + + public void testMoveClusterStateToFailedStep() { + String indexName = "my_index"; + String[] indices = new String[] { indexName }; + String policyName = "my_policy"; + long now = randomNonNegativeLong(); + StepKey failedStepKey = new StepKey("current_phase", "current_action", "current_step"); + StepKey errorStepKey = new StepKey(failedStepKey.getPhase(), failedStepKey.getAction(), ErrorStep.NAME); + Step step = new MockStep(failedStepKey, null); + PolicyStepsRegistry policyRegistry = createOneStepPolicyStepRegistry(policyName, step, indexName); + Settings.Builder indexSettingsBuilder = Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, policyName); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(errorStepKey.getPhase()); + lifecycleState.setAction(errorStepKey.getAction()); + lifecycleState.setStep(errorStepKey.getName()); + lifecycleState.setFailedStep(failedStepKey.getName()); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + Index index = clusterState.metaData().index(indexName).getIndex(); + IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, () -> now); + ClusterState nextClusterState = runner.moveClusterStateToFailedStep(clusterState, indices); + IndexLifecycleRunnerTests.assertClusterStateOnNextStep(clusterState, index, errorStepKey, failedStepKey, + nextClusterState, now); + } + + public void testMoveClusterStateToFailedStepIndexNotFound() { + String existingIndexName = "my_index"; + String invalidIndexName = "does_not_exist"; + ClusterState clusterState = buildClusterState(existingIndexName, Settings.builder(), LifecycleExecutionState.builder().build(), + Collections.emptyList()); + IndexLifecycleRunner runner = new IndexLifecycleRunner(null, null, () -> 0L); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> runner.moveClusterStateToFailedStep(clusterState, new String[] { invalidIndexName })); + assertThat(exception.getMessage(), equalTo("index [" + invalidIndexName + "] does not exist")); + } + + public void testMoveClusterStateToFailedStepInvalidPolicySetting() { + String indexName = "my_index"; + String[] indices = new String[] { indexName }; + String policyName = "my_policy"; + long now = randomNonNegativeLong(); + StepKey failedStepKey = new StepKey("current_phase", "current_action", "current_step"); + StepKey errorStepKey = new StepKey(failedStepKey.getPhase(), failedStepKey.getAction(), ErrorStep.NAME); + Step step = new MockStep(failedStepKey, null); + PolicyStepsRegistry policyRegistry = createOneStepPolicyStepRegistry(policyName, step); + Settings.Builder indexSettingsBuilder = Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, (String) null); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(errorStepKey.getPhase()); + lifecycleState.setAction(errorStepKey.getAction()); + lifecycleState.setStep(errorStepKey.getName()); + lifecycleState.setFailedStep(failedStepKey.getName()); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, () -> now); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> runner.moveClusterStateToFailedStep(clusterState, indices)); + assertThat(exception.getMessage(), equalTo("index [" + indexName + "] is not associated with an Index Lifecycle Policy")); + } + + public void testMoveClusterStateToFailedNotOnError() { + String indexName = "my_index"; + String[] indices = new String[] { indexName }; + String policyName = "my_policy"; + long now = randomNonNegativeLong(); + StepKey failedStepKey = new StepKey("current_phase", "current_action", "current_step"); + Step step = new MockStep(failedStepKey, null); + PolicyStepsRegistry policyRegistry = createOneStepPolicyStepRegistry(policyName, step); + Settings.Builder indexSettingsBuilder = Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, (String) null); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(failedStepKey.getPhase()); + lifecycleState.setAction(failedStepKey.getAction()); + lifecycleState.setStep(failedStepKey.getName()); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, () -> now); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> runner.moveClusterStateToFailedStep(clusterState, indices)); + assertThat(exception.getMessage(), equalTo("cannot retry an action for an index [" + indices[0] + + "] that has not encountered an error when running a Lifecycle Policy")); + } + + public void testAddStepInfoToClusterState() throws IOException { + String indexName = "my_index"; + StepKey currentStep = new StepKey("current_phase", "current_action", "current_step"); + RandomStepInfo stepInfo = new RandomStepInfo(() -> randomAlphaOfLength(10)); + + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStep.getPhase()); + lifecycleState.setAction(currentStep.getAction()); + lifecycleState.setStep(currentStep.getName()); + ClusterState clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), Collections.emptyList()); + Index index = clusterState.metaData().index(indexName).getIndex(); + ClusterState newClusterState = IndexLifecycleRunner.addStepInfoToClusterState(index, clusterState, stepInfo); + assertClusterStateStepInfo(clusterState, index, currentStep, newClusterState, stepInfo); + ClusterState runAgainClusterState = IndexLifecycleRunner.addStepInfoToClusterState(index, newClusterState, stepInfo); + assertSame(newClusterState, runAgainClusterState); + } + + private ClusterState buildClusterState(String indexName, Settings.Builder indexSettingsBuilder, + LifecycleExecutionState lifecycleState, + List lifecyclePolicyMetadatas) { + Settings indexSettings = indexSettingsBuilder.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + IndexMetaData indexMetadata = IndexMetaData.builder(indexName) + .settings(indexSettings) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.asMap()) + .build(); + + Map lifecyclePolicyMetadatasMap = lifecyclePolicyMetadatas.stream() + .collect(Collectors.toMap(LifecyclePolicyMetadata::getName, Function.identity())); + IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(lifecyclePolicyMetadatasMap, OperationMode.RUNNING); + + MetaData metadata = MetaData.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, indexLifecycleMetadata) + .build(); + return ClusterState.builder(new ClusterName("my_cluster")).metaData(metadata).build(); + } + + private static LifecyclePolicy createPolicy(String policyName, StepKey safeStep, StepKey unsafeStep) { + Map phases = new HashMap<>(); + if (safeStep != null) { + assert MockAction.NAME.equals(safeStep.getAction()) : "The safe action needs to be MockAction.NAME"; + assert unsafeStep == null + || safeStep.getPhase().equals(unsafeStep.getPhase()) == false : "safe and unsafe actions must be in different phases"; + Map actions = new HashMap<>(); + List steps = Collections.singletonList(new MockStep(safeStep, null)); + MockAction safeAction = new MockAction(steps, true); + actions.put(safeAction.getWriteableName(), safeAction); + Phase phase = new Phase(safeStep.getPhase(), TimeValue.timeValueMillis(0), actions); + phases.put(phase.getName(), phase); + } + if (unsafeStep != null) { + assert MockAction.NAME.equals(unsafeStep.getAction()) : "The unsafe action needs to be MockAction.NAME"; + Map actions = new HashMap<>(); + List steps = Collections.singletonList(new MockStep(unsafeStep, null)); + MockAction unsafeAction = new MockAction(steps, false); + actions.put(unsafeAction.getWriteableName(), unsafeAction); + Phase phase = new Phase(unsafeStep.getPhase(), TimeValue.timeValueMillis(0), actions); + phases.put(phase.getName(), phase); + } + return newTestLifecyclePolicy(policyName, phases); + } + + public void testRemovePolicyForIndex() { + String indexName = randomAlphaOfLength(10); + String oldPolicyName = "old_policy"; + StepKey currentStep = new StepKey(randomAlphaOfLength(10), MockAction.NAME, randomAlphaOfLength(10)); + LifecyclePolicy oldPolicy = createPolicy(oldPolicyName, currentStep, null); + Settings.Builder indexSettingsBuilder = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, oldPolicyName); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStep.getPhase()); + lifecycleState.setAction(currentStep.getAction()); + lifecycleState.setStep(currentStep.getName()); + List policyMetadatas = new ArrayList<>(); + policyMetadatas.add(new LifecyclePolicyMetadata(oldPolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); + Index index = clusterState.metaData().index(indexName).getIndex(); + Index[] indices = new Index[] { index }; + List failedIndexes = new ArrayList<>(); + + ClusterState newClusterState = IndexLifecycleRunner.removePolicyForIndexes(indices, clusterState, failedIndexes); + + assertTrue(failedIndexes.isEmpty()); + assertIndexNotManagedByILM(newClusterState, index); + } + + public void testRemovePolicyForIndexNoCurrentPolicy() { + String indexName = randomAlphaOfLength(10); + Settings.Builder indexSettingsBuilder = Settings.builder(); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, LifecycleExecutionState.builder().build(), + Collections.emptyList()); + Index index = clusterState.metaData().index(indexName).getIndex(); + Index[] indices = new Index[] { index }; + List failedIndexes = new ArrayList<>(); + + ClusterState newClusterState = IndexLifecycleRunner.removePolicyForIndexes(indices, clusterState, failedIndexes); + + assertTrue(failedIndexes.isEmpty()); + assertIndexNotManagedByILM(newClusterState, index); + } + + public void testRemovePolicyForIndexIndexDoesntExist() { + String indexName = randomAlphaOfLength(10); + String oldPolicyName = "old_policy"; + LifecyclePolicy oldPolicy = newTestLifecyclePolicy(oldPolicyName, Collections.emptyMap()); + StepKey currentStep = AbstractStepTestCase.randomStepKey(); + Settings.Builder indexSettingsBuilder = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, oldPolicyName); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStep.getPhase()); + lifecycleState.setAction(currentStep.getAction()); + lifecycleState.setStep(currentStep.getName()); + List policyMetadatas = new ArrayList<>(); + policyMetadatas.add(new LifecyclePolicyMetadata(oldPolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); + Index index = new Index("doesnt_exist", "im_not_here"); + Index[] indices = new Index[] { index }; + List failedIndexes = new ArrayList<>(); + + ClusterState newClusterState = IndexLifecycleRunner.removePolicyForIndexes(indices, clusterState, failedIndexes); + + assertEquals(1, failedIndexes.size()); + assertEquals("doesnt_exist", failedIndexes.get(0)); + assertSame(clusterState, newClusterState); + } + + public void testRemovePolicyForIndexIndexInUnsafe() { + String indexName = randomAlphaOfLength(10); + String oldPolicyName = "old_policy"; + StepKey currentStep = new StepKey(randomAlphaOfLength(10), MockAction.NAME, randomAlphaOfLength(10)); + LifecyclePolicy oldPolicy = createPolicy(oldPolicyName, null, currentStep); + Settings.Builder indexSettingsBuilder = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, oldPolicyName); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStep.getPhase()); + lifecycleState.setAction(currentStep.getAction()); + lifecycleState.setStep(currentStep.getName()); + List policyMetadatas = new ArrayList<>(); + policyMetadatas.add(new LifecyclePolicyMetadata(oldPolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); + Index index = clusterState.metaData().index(indexName).getIndex(); + Index[] indices = new Index[] { index }; + List failedIndexes = new ArrayList<>(); + + ClusterState newClusterState = IndexLifecycleRunner.removePolicyForIndexes(indices, clusterState, failedIndexes); + + assertTrue(failedIndexes.isEmpty()); + assertIndexNotManagedByILM(newClusterState, index); + } + + public void testIsReadyToTransition() { + String policyName = "async_action_policy"; + StepKey stepKey = new StepKey("phase", MockAction.NAME, MockAction.NAME); + MockAsyncActionStep step = new MockAsyncActionStep(stepKey, null); + step.setWillComplete(true); + SortedMap lifecyclePolicyMap = new TreeMap<>(Collections.singletonMap(policyName, + new LifecyclePolicyMetadata(createPolicy(policyName, null, step.getKey()), new HashMap<>(), + randomNonNegativeLong(), randomNonNegativeLong()))); + Map firstStepMap = Collections.singletonMap(policyName, step); + Map policySteps = Collections.singletonMap(step.getKey(), step); + Map> stepMap = Collections.singletonMap(policyName, policySteps); + PolicyStepsRegistry policyStepsRegistry = new PolicyStepsRegistry(lifecyclePolicyMap, firstStepMap, + stepMap, NamedXContentRegistry.EMPTY, null); + ClusterService clusterService = mock(ClusterService.class); + final AtomicLong now = new AtomicLong(5); + IndexLifecycleRunner runner = new IndexLifecycleRunner(policyStepsRegistry, clusterService, now::get); + IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)) + .numberOfReplicas(randomIntBetween(0, 5)) + .build(); + // With no time, always transition + assertTrue("index should be able to transition with no creation date", + runner.isReadyToTransitionToThisPhase(policyName, indexMetaData, "phase")); + + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setIndexCreationDate(10L); + indexMetaData = IndexMetaData.builder(indexMetaData) + .settings(Settings.builder() + .put(indexMetaData.getSettings()) + .build()) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .build(); + // Index is not old enough to transition + assertFalse("index is not able to transition if it isn't old enough", + runner.isReadyToTransitionToThisPhase(policyName, indexMetaData, "phase")); + + // Set to the fuuuuuttuuuuuuurre + now.set(Long.MAX_VALUE); + assertTrue("index should be able to transition past phase's age", + runner.isReadyToTransitionToThisPhase(policyName, indexMetaData, "phase")); + } + + + public static void assertIndexNotManagedByILM(ClusterState clusterState, Index index) { + MetaData metadata = clusterState.metaData(); + assertNotNull(metadata); + IndexMetaData indexMetadata = metadata.getIndexSafe(index); + assertNotNull(indexMetadata); + Settings indexSettings = indexMetadata.getSettings(); + assertNotNull(indexSettings); + assertFalse(LifecycleSettings.LIFECYCLE_NAME_SETTING.exists(indexSettings)); + assertFalse(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS_SETTING.exists(indexSettings)); + } + + public static void assertClusterStateOnPolicy(ClusterState oldClusterState, Index index, String expectedPolicy, StepKey previousStep, + StepKey expectedStep, ClusterState newClusterState, long now) { + assertNotSame(oldClusterState, newClusterState); + MetaData newMetadata = newClusterState.metaData(); + assertNotSame(oldClusterState.metaData(), newMetadata); + IndexMetaData newIndexMetadata = newMetadata.getIndexSafe(index); + assertNotSame(oldClusterState.metaData().index(index), newIndexMetadata); + LifecycleExecutionState newLifecycleState = LifecycleExecutionState + .fromIndexMetadata(newClusterState.metaData().index(index)); + LifecycleExecutionState oldLifecycleState = LifecycleExecutionState + .fromIndexMetadata(oldClusterState.metaData().index(index)); + assertNotSame(oldLifecycleState, newLifecycleState); + assertEquals(expectedStep.getPhase(), newLifecycleState.getPhase()); + assertEquals(expectedStep.getAction(), newLifecycleState.getAction()); + assertEquals(expectedStep.getName(), newLifecycleState.getStep()); + if (Objects.equals(previousStep.getPhase(), expectedStep.getPhase())) { + assertEquals(oldLifecycleState.getPhase(), newLifecycleState.getPhase()); + } else { + assertEquals(now, newLifecycleState.getPhaseTime().longValue()); + } + if (Objects.equals(previousStep.getAction(), expectedStep.getAction())) { + assertEquals(oldLifecycleState.getActionTime(), newLifecycleState.getActionTime()); + } else { + assertEquals(now, newLifecycleState.getActionTime().longValue()); + } + if (Objects.equals(previousStep.getName(), expectedStep.getName())) { + assertEquals(oldLifecycleState.getStepTime(), newLifecycleState.getStepTime()); + } else { + assertEquals(now, newLifecycleState.getStepTime().longValue()); + } + assertEquals(null, newLifecycleState.getFailedStep()); + assertEquals(null, newLifecycleState.getStepInfo()); + } + + public static void assertClusterStateOnNextStep(ClusterState oldClusterState, Index index, StepKey currentStep, StepKey nextStep, + ClusterState newClusterState, long now) { + assertNotSame(oldClusterState, newClusterState); + MetaData newMetadata = newClusterState.metaData(); + assertNotSame(oldClusterState.metaData(), newMetadata); + IndexMetaData newIndexMetadata = newMetadata.getIndexSafe(index); + assertNotSame(oldClusterState.metaData().index(index), newIndexMetadata); + LifecycleExecutionState newLifecycleState = LifecycleExecutionState + .fromIndexMetadata(newClusterState.metaData().index(index)); + LifecycleExecutionState oldLifecycleState = LifecycleExecutionState + .fromIndexMetadata(oldClusterState.metaData().index(index)); + assertNotSame(oldLifecycleState, newLifecycleState); + assertEquals(nextStep.getPhase(), newLifecycleState.getPhase()); + assertEquals(nextStep.getAction(), newLifecycleState.getAction()); + assertEquals(nextStep.getName(), newLifecycleState.getStep()); + if (currentStep.getPhase().equals(nextStep.getPhase())) { + assertEquals(oldLifecycleState.getPhaseTime(), newLifecycleState.getPhaseTime()); + } else { + assertEquals(now, newLifecycleState.getPhaseTime().longValue()); + } + if (currentStep.getAction().equals(nextStep.getAction())) { + assertEquals(oldLifecycleState.getActionTime(), newLifecycleState.getActionTime()); + } else { + assertEquals(now, newLifecycleState.getActionTime().longValue()); + } + assertEquals(now, newLifecycleState.getStepTime().longValue()); + assertEquals(null, newLifecycleState.getFailedStep()); + assertEquals(null, newLifecycleState.getStepInfo()); + } + + private void assertClusterStateOnErrorStep(ClusterState oldClusterState, Index index, StepKey currentStep, + ClusterState newClusterState, long now, String expectedCauseValue) throws IOException { + assertNotSame(oldClusterState, newClusterState); + MetaData newMetadata = newClusterState.metaData(); + assertNotSame(oldClusterState.metaData(), newMetadata); + IndexMetaData newIndexMetadata = newMetadata.getIndexSafe(index); + assertNotSame(oldClusterState.metaData().index(index), newIndexMetadata); + LifecycleExecutionState newLifecycleState = LifecycleExecutionState + .fromIndexMetadata(newClusterState.metaData().index(index)); + LifecycleExecutionState oldLifecycleState = LifecycleExecutionState + .fromIndexMetadata(oldClusterState.metaData().index(index)); + assertNotSame(oldLifecycleState, newLifecycleState); + assertEquals(currentStep.getPhase(), newLifecycleState.getPhase()); + assertEquals(currentStep.getAction(), newLifecycleState.getAction()); + assertEquals(ErrorStep.NAME, newLifecycleState.getStep()); + assertEquals(currentStep.getName(), newLifecycleState.getFailedStep()); + assertEquals(expectedCauseValue, newLifecycleState.getStepInfo()); + assertEquals(oldLifecycleState.getPhaseTime(), newLifecycleState.getPhaseTime()); + assertEquals(oldLifecycleState.getActionTime(), newLifecycleState.getActionTime()); + assertEquals(now, newLifecycleState.getStepTime().longValue()); + } + + private void assertClusterStateStepInfo(ClusterState oldClusterState, Index index, StepKey currentStep, ClusterState newClusterState, + ToXContentObject stepInfo) throws IOException { + XContentBuilder stepInfoXContentBuilder = JsonXContent.contentBuilder(); + stepInfo.toXContent(stepInfoXContentBuilder, ToXContent.EMPTY_PARAMS); + String expectedstepInfoValue = BytesReference.bytes(stepInfoXContentBuilder).utf8ToString(); + assertNotSame(oldClusterState, newClusterState); + MetaData newMetadata = newClusterState.metaData(); + assertNotSame(oldClusterState.metaData(), newMetadata); + IndexMetaData newIndexMetadata = newMetadata.getIndexSafe(index); + assertNotSame(oldClusterState.metaData().index(index), newIndexMetadata); + LifecycleExecutionState newLifecycleState = LifecycleExecutionState + .fromIndexMetadata(newClusterState.metaData().index(index)); + LifecycleExecutionState oldLifecycleState = LifecycleExecutionState + .fromIndexMetadata(oldClusterState.metaData().index(index)); + assertNotSame(oldLifecycleState, newLifecycleState); + assertEquals(currentStep.getPhase(), newLifecycleState.getPhase()); + assertEquals(currentStep.getAction(), newLifecycleState.getAction()); + assertEquals(currentStep.getName(), newLifecycleState.getStep()); + assertEquals(expectedstepInfoValue, newLifecycleState.getStepInfo()); + assertEquals(oldLifecycleState.getPhaseTime(), newLifecycleState.getPhaseTime()); + assertEquals(oldLifecycleState.getActionTime(), newLifecycleState.getActionTime()); + assertEquals(newLifecycleState.getStepTime(), newLifecycleState.getStepTime()); + } + + private static class MockAsyncActionStep extends AsyncActionStep { + + private Exception exception; + private boolean willComplete; + private boolean indexSurvives = true; + private long executeCount = 0; + private CountDownLatch latch; + + MockAsyncActionStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey, null); + } + + void setException(Exception exception) { + this.exception = exception; + } + + @Override + public boolean indexSurvives() { + return indexSurvives; + } + + void setWillComplete(boolean willComplete) { + this.willComplete = willComplete; + } + + long getExecuteCount() { + return executeCount; + } + + public void setLatch(CountDownLatch latch) { + this.latch = latch; + } + + @Override + public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + executeCount++; + if (latch != null) { + latch.countDown(); + } + if (exception == null) { + listener.onResponse(willComplete); + } else { + listener.onFailure(exception); + } + } + + } + + private static class MockAsyncWaitStep extends AsyncWaitStep { + + private Exception exception; + private boolean willComplete; + private long executeCount = 0; + private ToXContentObject expectedInfo = null; + private CountDownLatch latch; + + MockAsyncWaitStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey, null); + } + + void setException(Exception exception) { + this.exception = exception; + } + + long getExecuteCount() { + return executeCount; + } + + public void setLatch(CountDownLatch latch) { + this.latch = latch; + } + + @Override + public void evaluateCondition(IndexMetaData indexMetaData, Listener listener) { + executeCount++; + if (latch != null) { + latch.countDown(); + } + if (exception == null) { + listener.onResponse(willComplete, expectedInfo); + } else { + listener.onFailure(exception); + } + } + + } + + static class MockClusterStateActionStep extends ClusterStateActionStep { + + private RuntimeException exception; + private long executeCount = 0; + private CountDownLatch latch; + + MockClusterStateActionStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } + + public void setException(RuntimeException exception) { + this.exception = exception; + } + + public void setLatch(CountDownLatch latch) { + this.latch = latch; + } + + public long getExecuteCount() { + return executeCount; + } + + @Override + public ClusterState performAction(Index index, ClusterState clusterState) { + executeCount++; + if (latch != null) { + latch.countDown(); + } + if (exception != null) { + throw exception; + } + return clusterState; + } + } + + static class MockClusterStateWaitStep extends ClusterStateWaitStep { + + private RuntimeException exception; + private boolean willComplete; + private long executeCount = 0; + private ToXContentObject expectedInfo = null; + + MockClusterStateWaitStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } + + public void setException(RuntimeException exception) { + this.exception = exception; + } + + public void setWillComplete(boolean willComplete) { + this.willComplete = willComplete; + } + + void expectedInfo(ToXContentObject expectedInfo) { + this.expectedInfo = expectedInfo; + } + + public long getExecuteCount() { + return executeCount; + } + + @Override + public Result isConditionMet(Index index, ClusterState clusterState) { + executeCount++; + if (exception != null) { + throw exception; + } + return new Result(willComplete, expectedInfo); + } + + } + + private static class SetStepInfoUpdateTaskMatcher extends ArgumentMatcher { + + private Index index; + private String policy; + private StepKey currentStepKey; + private ToXContentObject stepInfo; + + SetStepInfoUpdateTaskMatcher(Index index, String policy, StepKey currentStepKey, ToXContentObject stepInfo) { + this.index = index; + this.policy = policy; + this.currentStepKey = currentStepKey; + this.stepInfo = stepInfo; + } + + @Override + public boolean matches(Object argument) { + if (argument == null || argument instanceof SetStepInfoUpdateTask == false) { + return false; + } + SetStepInfoUpdateTask task = (SetStepInfoUpdateTask) argument; + return Objects.equals(index, task.getIndex()) && + Objects.equals(policy, task.getPolicy())&& + Objects.equals(currentStepKey, task.getCurrentStepKey()) && + Objects.equals(xContentToString(stepInfo), xContentToString(task.getStepInfo())); + } + + private String xContentToString(ToXContentObject xContent) { + try { + XContentBuilder builder = JsonXContent.contentBuilder(); + stepInfo.toXContent(builder, ToXContent.EMPTY_PARAMS); + return BytesReference.bytes(builder).utf8ToString(); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + } + + } + + private static class ExecuteStepsUpdateTaskMatcher extends ArgumentMatcher { + + private Index index; + private String policy; + private Step startStep; + + ExecuteStepsUpdateTaskMatcher(Index index, String policy, Step startStep) { + this.index = index; + this.policy = policy; + this.startStep = startStep; + } + + @Override + public boolean matches(Object argument) { + if (argument == null || argument instanceof ExecuteStepsUpdateTask == false) { + return false; + } + ExecuteStepsUpdateTask task = (ExecuteStepsUpdateTask) argument; + return Objects.equals(index, task.getIndex()) && + Objects.equals(policy, task.getPolicy()) && + Objects.equals(startStep, task.getStartStep()); + } + + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java new file mode 100644 index 0000000000000..13fe9c1c69002 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java @@ -0,0 +1,244 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.Index; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.MockAction; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; +import org.junit.After; +import org.junit.Before; +import org.mockito.Mockito; + +import java.time.Clock; +import java.time.Instant; +import java.time.ZoneId; +import java.util.Collections; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.concurrent.ExecutorService; + +import static org.elasticsearch.node.Node.NODE_MASTER_SETTING; +import static org.elasticsearch.xpack.core.indexlifecycle.AbstractStepTestCase.randomStepKey; +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTestsUtils.newTestLifecyclePolicy; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class IndexLifecycleServiceTests extends ESTestCase { + + private ClusterService clusterService; + private IndexLifecycleService indexLifecycleService; + private String nodeId; + private DiscoveryNode masterNode; + private IndicesAdminClient indicesClient; + private long now; + + @Before + public void prepareServices() { + nodeId = randomAlphaOfLength(10); + ExecutorService executorService = mock(ExecutorService.class); + clusterService = mock(ClusterService.class); + masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT) + .put(NODE_MASTER_SETTING.getKey(), true).build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId); + now = randomNonNegativeLong(); + Clock clock = Clock.fixed(Instant.ofEpochMilli(now), ZoneId.of(randomFrom(ZoneId.getAvailableZoneIds()))); + + doAnswer(invocationOnMock -> null).when(clusterService).addListener(any()); + doAnswer(invocationOnMock -> { + Runnable runnable = (Runnable) invocationOnMock.getArguments()[0]; + runnable.run(); + return null; + }).when(executorService).execute(any()); + Settings settings = Settings.builder().put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, "1s").build(); + when(clusterService.getClusterSettings()).thenReturn(new ClusterSettings(settings, + Collections.singleton(LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING))); + + Client client = mock(Client.class); + AdminClient adminClient = mock(AdminClient.class); + indicesClient = mock(IndicesAdminClient.class); + when(client.admin()).thenReturn(adminClient); + when(adminClient.indices()).thenReturn(indicesClient); + when(client.settings()).thenReturn(Settings.EMPTY); + + indexLifecycleService = new IndexLifecycleService(Settings.EMPTY, client, clusterService, clock, () -> now, null); + Mockito.verify(clusterService).addListener(indexLifecycleService); + Mockito.verify(clusterService).addStateApplier(indexLifecycleService); + } + + @After + public void cleanup() { + indexLifecycleService.close(); + } + + + public void testStoppedModeSkip() { + String policyName = randomAlphaOfLengthBetween(1, 20); + IndexLifecycleRunnerTests.MockClusterStateActionStep mockStep = + new IndexLifecycleRunnerTests.MockClusterStateActionStep(randomStepKey(), randomStepKey()); + MockAction mockAction = new MockAction(Collections.singletonList(mockStep)); + Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); + LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase)); + SortedMap policyMap = new TreeMap<>(); + policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + IndexMetaData indexMetadata = IndexMetaData.builder(index.getName()) + .settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey(), policyName)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder() + .fPut(index.getName(), indexMetadata); + MetaData metaData = MetaData.builder() + .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPED)) + .indices(indices.build()) + .persistentSettings(settings(Version.CURRENT).build()) + .build(); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + ClusterChangedEvent event = new ClusterChangedEvent("_source", currentState, ClusterState.EMPTY_STATE); + indexLifecycleService.applyClusterState(event); + indexLifecycleService.triggerPolicies(currentState, randomBoolean()); + assertThat(mockStep.getExecuteCount(), equalTo(0L)); + } + + public void testRequestedStopOnShrink() { + Step.StepKey mockShrinkStep = new Step.StepKey(randomAlphaOfLength(4), ShrinkAction.NAME, randomAlphaOfLength(5)); + String policyName = randomAlphaOfLengthBetween(1, 20); + IndexLifecycleRunnerTests.MockClusterStateActionStep mockStep = + new IndexLifecycleRunnerTests.MockClusterStateActionStep(mockShrinkStep, randomStepKey()); + MockAction mockAction = new MockAction(Collections.singletonList(mockStep)); + Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); + LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase)); + SortedMap policyMap = new TreeMap<>(); + policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(mockShrinkStep.getPhase()); + lifecycleState.setAction(mockShrinkStep.getAction()); + lifecycleState.setStep(mockShrinkStep.getName()); + IndexMetaData indexMetadata = IndexMetaData.builder(index.getName()) + .settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey(), policyName)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder() + .fPut(index.getName(), indexMetadata); + MetaData metaData = MetaData.builder() + .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPING)) + .indices(indices.build()) + .persistentSettings(settings(Version.CURRENT).build()) + .build(); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + + ClusterChangedEvent event = new ClusterChangedEvent("_source", currentState, ClusterState.EMPTY_STATE); + SetOnce executedShrink = new SetOnce<>(); + doAnswer(invocationOnMock -> { + executedShrink.set(true); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any(ExecuteStepsUpdateTask.class)); + indexLifecycleService.applyClusterState(event); + indexLifecycleService.triggerPolicies(currentState, true); + assertTrue(executedShrink.get()); + } + + public void testRequestedStopOnSafeAction() { + String policyName = randomAlphaOfLengthBetween(1, 20); + Step.StepKey currentStepKey = randomStepKey(); + IndexLifecycleRunnerTests.MockClusterStateActionStep mockStep = + new IndexLifecycleRunnerTests.MockClusterStateActionStep(currentStepKey, randomStepKey()); + MockAction mockAction = new MockAction(Collections.singletonList(mockStep)); + Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); + LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase)); + SortedMap policyMap = new TreeMap<>(); + policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStepKey.getPhase()); + lifecycleState.setAction(currentStepKey.getAction()); + lifecycleState.setStep(currentStepKey.getName()); + IndexMetaData indexMetadata = IndexMetaData.builder(index.getName()) + .settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey(), policyName)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder() + .fPut(index.getName(), indexMetadata); + MetaData metaData = MetaData.builder() + .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPING)) + .indices(indices.build()) + .persistentSettings(settings(Version.CURRENT).build()) + .build(); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + + ClusterChangedEvent event = new ClusterChangedEvent("_source", currentState, ClusterState.EMPTY_STATE); + + SetOnce ranPolicy = new SetOnce<>(); + SetOnce moveToMaintenance = new SetOnce<>(); + doAnswer(invocationOnMock -> { + ranPolicy.set(true); + throw new AssertionError("invalid invocation"); + }).when(clusterService).submitStateUpdateTask(anyString(), any(ExecuteStepsUpdateTask.class)); + + doAnswer(invocationOnMock -> { + OperationModeUpdateTask task = (OperationModeUpdateTask) invocationOnMock.getArguments()[1]; + assertThat(task.getOperationMode(), equalTo(OperationMode.STOPPED)); + moveToMaintenance.set(true); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any(OperationModeUpdateTask.class)); + + indexLifecycleService.applyClusterState(event); + indexLifecycleService.triggerPolicies(currentState, randomBoolean()); + assertNull(ranPolicy.get()); + assertTrue(moveToMaintenance.get()); + } + + public void testTriggeredDifferentJob() { + Mockito.reset(clusterService); + SchedulerEngine.Event schedulerEvent = new SchedulerEngine.Event("foo", randomLong(), randomLong()); + indexLifecycleService.triggered(schedulerEvent); + Mockito.verifyZeroInteractions(indicesClient, clusterService); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicyClientTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicyClientTests.java new file mode 100644 index 0000000000000..4fec7ba80db8e --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicyClientTests.java @@ -0,0 +1,134 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ClientHelper; +import org.mockito.Mockito; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CountDownLatch; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class LifecyclePolicyClientTests extends ESTestCase { + + public void testExecuteWithHeadersAsyncNoHeaders() throws InterruptedException { + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + final ThreadPool threadPool = mock(ThreadPool.class); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(threadContext); + + final CountDownLatch latch = new CountDownLatch(2); + final ActionListener listener = ActionListener.wrap(v -> { + assertTrue(threadContext.getHeaders().isEmpty()); + latch.countDown(); + }, e -> fail(e.getMessage())); + + doAnswer(invocationOnMock -> { + assertTrue(threadContext.getHeaders().isEmpty()); + latch.countDown(); + ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(null); + return null; + }).when(client).execute(anyObject(), anyObject(), anyObject()); + + SearchRequest request = new SearchRequest("foo"); + + try (LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, + Collections.emptyMap())) { + policyClient.execute(SearchAction.INSTANCE, request, listener); + } + + latch.await(); + } + + public void testExecuteWithHeadersAsyncWrongHeaders() throws InterruptedException { + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + final ThreadPool threadPool = mock(ThreadPool.class); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(threadContext); + + final CountDownLatch latch = new CountDownLatch(2); + final ActionListener listener = ActionListener.wrap(v -> { + assertTrue(threadContext.getHeaders().isEmpty()); + latch.countDown(); + }, e -> fail(e.getMessage())); + + doAnswer(invocationOnMock -> { + assertTrue(threadContext.getHeaders().isEmpty()); + latch.countDown(); + ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(null); + return null; + }).when(client).execute(anyObject(), anyObject(), anyObject()); + + SearchRequest request = new SearchRequest("foo"); + Map headers = new HashMap<>(1); + headers.put("foo", "foo"); + headers.put("bar", "bar"); + + try (LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, + headers)) { + policyClient.execute(SearchAction.INSTANCE, request, listener); + } + + latch.await(); + } + + public void testExecuteWithHeadersAsyncWithHeaders() throws Exception { + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + final ThreadPool threadPool = mock(ThreadPool.class); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(threadContext); + + final CountDownLatch latch = new CountDownLatch(2); + final ActionListener listener = ActionListener.wrap(v -> { + assertTrue(threadContext.getHeaders().isEmpty()); + latch.countDown(); + }, e -> fail(e.getMessage())); + + doAnswer(invocationOnMock -> { + assertThat(threadContext.getHeaders().size(), equalTo(2)); + assertThat(threadContext.getHeaders().get("es-security-runas-user"), equalTo("foo")); + assertThat(threadContext.getHeaders().get("_xpack_security_authentication"), equalTo("bar")); + latch.countDown(); + ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(null); + return null; + }).when(client).execute(anyObject(), anyObject(), anyObject()); + + SearchRequest request = new SearchRequest("foo"); + Map headers = new HashMap<>(1); + headers.put("es-security-runas-user", "foo"); + headers.put("_xpack_security_authentication", "bar"); + + try (LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, + headers)) { + policyClient.execute(SearchAction.INSTANCE, request, listener); + } + + latch.await(); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LockableLifecycleType.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LockableLifecycleType.java new file mode 100644 index 0000000000000..3e09133c435a8 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LockableLifecycleType.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +/** + * This {@link LifecycleType} is used for encapsulating test policies + * used in integration tests where the underlying {@link LifecycleAction}s are + * able to communicate with the test + */ +public class LockableLifecycleType implements LifecycleType { + public static final String TYPE = "lockable"; + public static final LockableLifecycleType INSTANCE = new LockableLifecycleType(); + + @Override + public List getOrderedPhases(Map phases) { + return new ArrayList<>(phases.values()); + } + + @Override + public String getNextPhaseName(String currentPhaseName, Map phases) { + return null; + } + + @Override + public String getPreviousPhaseName(String currentPhaseName, Map phases) { + return null; + } + + @Override + public List getOrderedActions(Phase phase) { + return new ArrayList<>(phase.getActions().values()); + } + + @Override + public String getNextActionName(String currentActionName, Phase phase) { + return null; + } + + @Override + public void validate(Collection phases) { + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public void writeTo(StreamOutput out) { + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTaskTests.java new file mode 100644 index 0000000000000..dc3a6602f39ba --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTaskTests.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.Index; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.ErrorStep; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTests; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; + +public class MoveToErrorStepUpdateTaskTests extends ESTestCase { + + String policy; + ClusterState clusterState; + Index index; + + @Before + public void setupClusterState() { + policy = randomAlphaOfLength(10); + LifecyclePolicy lifecyclePolicy = LifecyclePolicyTests.randomTestLifecyclePolicy(policy); + IndexMetaData indexMetadata = IndexMetaData.builder(randomAlphaOfLength(5)) + .settings(settings(Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, policy)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + index = indexMetadata.getIndex(); + IndexLifecycleMetadata ilmMeta = new IndexLifecycleMetadata( + Collections.singletonMap(policy, new LifecyclePolicyMetadata(lifecyclePolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())), + OperationMode.RUNNING); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(indexMetadata)) + .putCustom(IndexLifecycleMetadata.TYPE, ilmMeta) + .build(); + clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + } + + public void testExecuteSuccessfullyMoved() throws IOException { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + long now = randomNonNegativeLong(); + Exception cause = new ElasticsearchException("THIS IS AN EXPECTED CAUSE"); + + setStateToKey(currentStepKey); + + MoveToErrorStepUpdateTask task = new MoveToErrorStepUpdateTask(index, policy, currentStepKey, cause, () -> now); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey actualKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(actualKey, equalTo(new StepKey(currentStepKey.getPhase(), currentStepKey.getAction(), ErrorStep.NAME))); + assertThat(lifecycleState.getFailedStep(), equalTo(currentStepKey.getName())); + assertThat(lifecycleState.getPhaseTime(), nullValue()); + assertThat(lifecycleState.getActionTime(), nullValue()); + assertThat(lifecycleState.getStepTime(), equalTo(now)); + + XContentBuilder causeXContentBuilder = JsonXContent.contentBuilder(); + causeXContentBuilder.startObject(); + ElasticsearchException.generateThrowableXContent(causeXContentBuilder, ToXContent.EMPTY_PARAMS, cause); + causeXContentBuilder.endObject(); + String expectedCauseValue = BytesReference.bytes(causeXContentBuilder).utf8ToString(); + assertThat(lifecycleState.getStepInfo(), equalTo(expectedCauseValue)); + } + + public void testExecuteNoopDifferentStep() throws IOException { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + StepKey notCurrentStepKey = new StepKey("not-current", "not-current", "not-current"); + long now = randomNonNegativeLong(); + Exception cause = new ElasticsearchException("THIS IS AN EXPECTED CAUSE"); + setStateToKey(notCurrentStepKey); + MoveToErrorStepUpdateTask task = new MoveToErrorStepUpdateTask(index, policy, currentStepKey, cause, () -> now); + ClusterState newState = task.execute(clusterState); + assertThat(newState, sameInstance(clusterState)); + } + + public void testExecuteNoopDifferentPolicy() throws IOException { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + long now = randomNonNegativeLong(); + Exception cause = new ElasticsearchException("THIS IS AN EXPECTED CAUSE"); + setStateToKey(currentStepKey); + setStatePolicy("not-" + policy); + MoveToErrorStepUpdateTask task = new MoveToErrorStepUpdateTask(index, policy, currentStepKey, cause, () -> now); + ClusterState newState = task.execute(clusterState); + assertThat(newState, sameInstance(clusterState)); + } + + public void testOnFailure() { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + long now = randomNonNegativeLong(); + Exception cause = new ElasticsearchException("THIS IS AN EXPECTED CAUSE"); + + setStateToKey(currentStepKey); + + MoveToErrorStepUpdateTask task = new MoveToErrorStepUpdateTask(index, policy, currentStepKey, cause, () -> now); + Exception expectedException = new RuntimeException(); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> task.onFailure(randomAlphaOfLength(10), expectedException)); + assertEquals("policy [" + policy + "] for index [" + index.getName() + "] failed trying to move from step [" + currentStepKey + + "] to the ERROR step.", exception.getMessage()); + assertSame(expectedException, exception.getCause()); + } + + private void setStatePolicy(String policy) { + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .updateSettings(Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, policy).build(), index.getName())).build(); + + } + private void setStateToKey(StepKey stepKey) { + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder( + LifecycleExecutionState.fromIndexMetadata(clusterState.metaData().index(index))); + lifecycleState.setPhase(stepKey.getPhase()); + lifecycleState.setAction(stepKey.getAction()); + lifecycleState.setStep(stepKey.getName()); + + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.getMetaData()) + .put(IndexMetaData.builder(clusterState.getMetaData().index(index)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()))).build(); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTaskTests.java new file mode 100644 index 0000000000000..f166bba25c986 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTaskTests.java @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTests; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.junit.Before; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.hamcrest.Matchers.equalTo; + +public class MoveToNextStepUpdateTaskTests extends ESTestCase { + + String policy; + ClusterState clusterState; + Index index; + LifecyclePolicy lifecyclePolicy; + + @Before + public void setupClusterState() { + policy = randomAlphaOfLength(10); + IndexMetaData indexMetadata = IndexMetaData.builder(randomAlphaOfLength(5)) + .settings(settings(Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, policy)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + index = indexMetadata.getIndex(); + lifecyclePolicy = LifecyclePolicyTests.randomTestLifecyclePolicy(policy); + IndexLifecycleMetadata ilmMeta = new IndexLifecycleMetadata( + Collections.singletonMap(policy, new LifecyclePolicyMetadata(lifecyclePolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())), + OperationMode.RUNNING); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(indexMetadata)) + .putCustom(IndexLifecycleMetadata.TYPE, ilmMeta) + .build(); + clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + } + + public void testExecuteSuccessfullyMoved() { + long now = randomNonNegativeLong(); + List steps = lifecyclePolicy.toSteps(null); + StepKey currentStepKey = steps.get(0).getKey(); + StepKey nextStepKey = steps.get(0).getNextStepKey(); + + setStateToKey(currentStepKey, now); + + AtomicBoolean changed = new AtomicBoolean(false); + MoveToNextStepUpdateTask task = new MoveToNextStepUpdateTask(index, policy, currentStepKey, nextStepKey, + () -> now, state -> changed.set(true)); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey actualKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(actualKey, equalTo(nextStepKey)); + assertThat(lifecycleState.getPhaseTime(), equalTo(now)); + assertThat(lifecycleState.getActionTime(), equalTo(now)); + assertThat(lifecycleState.getStepTime(), equalTo(now)); + task.clusterStateProcessed("source", clusterState, newState); + assertTrue(changed.get()); + } + + public void testExecuteDifferentCurrentStep() { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + StepKey notCurrentStepKey = new StepKey("not-current", "not-current", "not-current"); + long now = randomNonNegativeLong(); + setStateToKey(notCurrentStepKey, now); + MoveToNextStepUpdateTask task = new MoveToNextStepUpdateTask(index, policy, currentStepKey, null, () -> now, null); + ClusterState newState = task.execute(clusterState); + assertSame(newState, clusterState); + } + + public void testExecuteDifferentPolicy() { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + long now = randomNonNegativeLong(); + setStateToKey(currentStepKey, now); + setStatePolicy("not-" + policy); + MoveToNextStepUpdateTask task = new MoveToNextStepUpdateTask(index, policy, currentStepKey, null, () -> now, null); + ClusterState newState = task.execute(clusterState); + assertSame(newState, clusterState); + } + + public void testExecuteSuccessfulMoveWithInvalidNextStep() { + long now = randomNonNegativeLong(); + List steps = lifecyclePolicy.toSteps(null); + StepKey currentStepKey = steps.get(0).getKey(); + StepKey invalidNextStep = new StepKey("next-invalid", "next-invalid", "next-invalid"); + + setStateToKey(currentStepKey, now); + + SetOnce changed = new SetOnce<>(); + MoveToNextStepUpdateTask task = new MoveToNextStepUpdateTask(index, policy, currentStepKey, + invalidNextStep, () -> now, s -> changed.set(true)); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey actualKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(actualKey, equalTo(invalidNextStep)); + assertThat(lifecycleState.getPhaseTime(), equalTo(now)); + assertThat(lifecycleState.getActionTime(), equalTo(now)); + assertThat(lifecycleState.getStepTime(), equalTo(now)); + task.clusterStateProcessed("source", clusterState, newState); + assertTrue(changed.get()); + } + + public void testOnFailure() { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + StepKey nextStepKey = new StepKey("next-phase", "next-action", "next-name"); + long now = randomNonNegativeLong(); + + setStateToKey(currentStepKey, now); + + MoveToNextStepUpdateTask task = new MoveToNextStepUpdateTask(index, policy, currentStepKey, nextStepKey, () -> now, state -> {}); + Exception expectedException = new RuntimeException(); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> task.onFailure(randomAlphaOfLength(10), expectedException)); + assertEquals("policy [" + policy + "] for index [" + index.getName() + "] failed trying to move from step [" + currentStepKey + + "] to step [" + nextStepKey + "].", exception.getMessage()); + assertSame(expectedException, exception.getCause()); + } + + private void setStatePolicy(String policy) { + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .updateSettings(Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, policy).build(), index.getName())).build(); + + } + private void setStateToKey(StepKey stepKey, long now) { + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder( + LifecycleExecutionState.fromIndexMetadata(clusterState.metaData().index(index))); + lifecycleState.setPhase(stepKey.getPhase()); + lifecycleState.setPhaseTime(now); + lifecycleState.setAction(stepKey.getAction()); + lifecycleState.setActionTime(now); + lifecycleState.setStep(stepKey.getName()); + lifecycleState.setStepTime(now); + lifecycleState.setPhaseDefinition("{\"actions\":{\"TEST_ACTION\":{}}}"); + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.getMetaData()) + .put(IndexMetaData.builder(clusterState.getMetaData().index(index)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()))).build(); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTaskTests.java new file mode 100644 index 0000000000000..dccd12e15f114 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTaskTests.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; + +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class OperationModeUpdateTaskTests extends ESTestCase { + + public void testExecute() { + assertMove(OperationMode.RUNNING, OperationMode.STOPPING); + assertMove(OperationMode.STOPPING, randomFrom(OperationMode.RUNNING, OperationMode.STOPPED)); + assertMove(OperationMode.STOPPED, OperationMode.RUNNING); + + OperationMode mode = randomFrom(OperationMode.values()); + assertNoMove(mode, mode); + assertNoMove(OperationMode.STOPPED, OperationMode.STOPPING); + assertNoMove(OperationMode.RUNNING, OperationMode.STOPPED); + } + + public void testExecuteWithEmptyMetadata() { + OperationMode requestedMode = OperationMode.STOPPING; + OperationMode newMode = executeUpdate(false, IndexLifecycleMetadata.EMPTY.getOperationMode(), + requestedMode, false); + assertThat(newMode, equalTo(requestedMode)); + + requestedMode = randomFrom(OperationMode.RUNNING, OperationMode.STOPPED); + newMode = executeUpdate(false, IndexLifecycleMetadata.EMPTY.getOperationMode(), + requestedMode, false); + assertThat(newMode, equalTo(OperationMode.RUNNING)); + } + + private void assertMove(OperationMode currentMode, OperationMode requestedMode) { + OperationMode newMode = executeUpdate(true, currentMode, requestedMode, false); + assertThat(newMode, equalTo(requestedMode)); + } + + private void assertNoMove(OperationMode currentMode, OperationMode requestedMode) { + OperationMode newMode = executeUpdate(true, currentMode, requestedMode, true); + assertThat(newMode, equalTo(currentMode)); + } + + private OperationMode executeUpdate(boolean metadataInstalled, OperationMode currentMode, OperationMode requestMode, + boolean assertSameClusterState) { + IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(Collections.emptyMap(), currentMode); + ImmutableOpenMap.Builder customsMapBuilder = ImmutableOpenMap.builder(); + MetaData.Builder metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()); + if (metadataInstalled) { + metaData.customs(customsMapBuilder.fPut(IndexLifecycleMetadata.TYPE, indexLifecycleMetadata).build()); + } + ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + OperationModeUpdateTask task = new OperationModeUpdateTask(requestMode); + ClusterState newState = task.execute(state); + if (assertSameClusterState) { + assertSame(state, newState); + } else { + assertThat(state, not(equalTo(newState))); + } + IndexLifecycleMetadata newMetaData = newState.metaData().custom(IndexLifecycleMetadata.TYPE); + assertThat(newMetaData.getPolicyMetadatas(), equalTo(indexLifecycleMetadata.getPolicyMetadatas())); + return newMetaData.getOperationMode(); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PhaseStatsTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PhaseStatsTests.java new file mode 100644 index 0000000000000..fe7fd1fca05d3 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PhaseStatsTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PhaseStats; + +import java.io.IOException; +import java.util.Arrays; + +public class PhaseStatsTests extends AbstractWireSerializingTestCase { + + @Override + protected PhaseStats createTestInstance() { + return randomPhaseStats(); + } + + static PhaseStats randomPhaseStats() { + TimeValue minimumAge = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + String[] actionNames = generateRandomStringArray(10, 20, false); + return new PhaseStats(minimumAge, actionNames); + } + + @Override + protected PhaseStats mutateInstance(PhaseStats instance) throws IOException { + TimeValue minimumAge = instance.getAfter(); + String[] actionNames = instance.getActionNames(); + switch (between(0, 1)) { + case 0: + minimumAge = randomValueOtherThan(minimumAge, + () -> TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after")); + break; + case 1: + actionNames = Arrays.copyOf(actionNames, actionNames.length + 1); + actionNames[actionNames.length - 1] = randomAlphaOfLengthBetween(10, 20); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new PhaseStats(minimumAge, actionNames); + } + + @Override + protected Reader instanceReader() { + return PhaseStats::new; + } + +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStatsTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStatsTests.java new file mode 100644 index 0000000000000..5ced745c2fb3f --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStatsTests.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PhaseStats; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PolicyStats; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class PolicyStatsTests extends AbstractWireSerializingTestCase { + + @Override + protected PolicyStats createTestInstance() { + return randomPolicyStats(); + } + + static PolicyStats randomPolicyStats() { + Map phaseStats = new HashMap<>(); + int size = randomIntBetween(0, 10); + for (int i = 0; i < size; i++) { + phaseStats.put(randomAlphaOfLength(10), PhaseStatsTests.randomPhaseStats()); + } + int numberIndicesManaged = randomIntBetween(0, 1000); + return new PolicyStats(phaseStats, numberIndicesManaged); + } + + @Override + protected PolicyStats mutateInstance(PolicyStats instance) throws IOException { + Map phaseStats = instance.getPhaseStats(); + int numberIndicesManaged = instance.getIndicesManaged(); + switch (between(0, 1)) { + case 0: + phaseStats = new HashMap<>(phaseStats); + phaseStats.put(randomAlphaOfLength(11), PhaseStatsTests.randomPhaseStats()); + break; + case 1: + numberIndicesManaged += randomIntBetween(1, 10); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new PolicyStats(phaseStats, numberIndicesManaged); + } + + @Override + protected Reader instanceReader() { + return PolicyStats::new; + } + +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistryTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistryTests.java new file mode 100644 index 0000000000000..611522a59b0d4 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistryTests.java @@ -0,0 +1,417 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.Index; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.ErrorStep; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.InitializePolicyContextStep; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTests; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.MockStep; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.PhaseExecutionInfo; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkStep; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.mockito.Mockito; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.mock; + +public class PolicyStepsRegistryTests extends ESTestCase { + private static final Step.StepKey MOCK_STEP_KEY = new Step.StepKey("mock", "mock", "mock"); + private static final NamedXContentRegistry REGISTRY = new NamedXContentRegistry(new IndexLifecycle(Settings.EMPTY).getNamedXContent()); + + private IndexMetaData emptyMetaData(Index index) { + return IndexMetaData.builder(index.getName()).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + } + + public void testGetFirstStep() { + String policyName = randomAlphaOfLengthBetween(2, 10); + Step expectedFirstStep = new MockStep(MOCK_STEP_KEY, null); + Map firstStepMap = Collections.singletonMap(policyName, expectedFirstStep); + PolicyStepsRegistry registry = new PolicyStepsRegistry(null, firstStepMap, null, NamedXContentRegistry.EMPTY, null); + Step actualFirstStep = registry.getFirstStep(policyName); + assertThat(actualFirstStep, sameInstance(expectedFirstStep)); + } + + public void testGetFirstStepUnknownPolicy() { + String policyName = randomAlphaOfLengthBetween(2, 10); + Step expectedFirstStep = new MockStep(MOCK_STEP_KEY, null); + Map firstStepMap = Collections.singletonMap(policyName, expectedFirstStep); + PolicyStepsRegistry registry = new PolicyStepsRegistry(null, firstStepMap, null, NamedXContentRegistry.EMPTY, null); + Step actualFirstStep = registry.getFirstStep(policyName + "unknown"); + assertNull(actualFirstStep); + } + + public void testGetStep() { + Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicyWithAllPhases("policy"); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong()); + String phaseName = randomFrom(policy.getPhases().keySet()); + Phase phase = policy.getPhases().get(phaseName); + PhaseExecutionInfo pei = new PhaseExecutionInfo(policy.getName(), phase, 1, randomNonNegativeLong()); + String phaseJson = Strings.toString(pei); + LifecycleAction action = randomFrom(phase.getActions().values()); + Step step = randomFrom(action.toSteps(client, phaseName, MOCK_STEP_KEY)); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhaseDefinition(phaseJson); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, "policy") + .build()) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .build(); + SortedMap metas = new TreeMap<>(); + metas.put("policy", policyMetadata); + PolicyStepsRegistry registry = new PolicyStepsRegistry(metas, null, null, REGISTRY, client); + Step actualStep = registry.getStep(indexMetaData, step.getKey()); + assertThat(actualStep.getKey(), equalTo(step.getKey())); + } + + public void testGetStepErrorStep() { + Step.StepKey errorStepKey = new Step.StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), ErrorStep.NAME); + Step expectedStep = new ErrorStep(errorStepKey); + Index index = new Index("test", "uuid"); + Map> indexSteps = Collections.singletonMap(index, Collections.singletonList(expectedStep)); + PolicyStepsRegistry registry = new PolicyStepsRegistry(null, null, null, NamedXContentRegistry.EMPTY, null); + Step actualStep = registry.getStep(emptyMetaData(index), errorStepKey); + assertThat(actualStep, equalTo(expectedStep)); + } + + public void testGetStepUnknownPolicy() { + PolicyStepsRegistry registry = new PolicyStepsRegistry(null, null, null, NamedXContentRegistry.EMPTY, null); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> registry.getStep(emptyMetaData(new Index("test", "uuid")), MOCK_STEP_KEY)); + assertThat(e.getMessage(), + containsString("failed to retrieve step {\"phase\":\"mock\",\"action\":\"mock\",\"name\":\"mock\"}" + + " as index [test] has no policy")); + } + + public void testGetStepForIndexWithNoPhaseGetsInitializationStep() { + Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicy("policy"); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong()); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, "policy") + .build()) + .build(); + SortedMap metas = new TreeMap<>(); + metas.put("policy", policyMetadata); + PolicyStepsRegistry registry = new PolicyStepsRegistry(metas, null, null, REGISTRY, client); + Step step = registry.getStep(indexMetaData, InitializePolicyContextStep.KEY); + assertNotNull(step); + } + + public void testGetStepUnknownStepKey() { + Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicyWithAllPhases("policy"); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong()); + String phaseName = randomFrom(policy.getPhases().keySet()); + Phase phase = policy.getPhases().get(phaseName); + PhaseExecutionInfo pei = new PhaseExecutionInfo(policy.getName(), phase, 1, randomNonNegativeLong()); + String phaseJson = Strings.toString(pei); + LifecycleAction action = randomFrom(phase.getActions().values()); + Step step = randomFrom(action.toSteps(client, phaseName, MOCK_STEP_KEY)); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhaseDefinition(phaseJson); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, "policy") + .build()) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .build(); + SortedMap metas = new TreeMap<>(); + metas.put("policy", policyMetadata); + PolicyStepsRegistry registry = new PolicyStepsRegistry(metas, null, null, REGISTRY, client); + Step actualStep = registry.getStep(indexMetaData, + new Step.StepKey(step.getKey().getPhase(), step.getKey().getAction(), step.getKey().getName() + "-bad")); + assertNull(actualStep); + } + + public void testUpdateFromNothingToSomethingToNothing() throws Exception { + Index index = new Index("test", "uuid"); + Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + String policyName = randomAlphaOfLength(5); + LifecyclePolicy newPolicy = LifecyclePolicyTests.randomTestLifecyclePolicy(policyName); + logger.info("--> policy: {}", newPolicy); + List policySteps = newPolicy.toSteps(client); + Map headers = new HashMap<>(); + if (randomBoolean()) { + headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); + headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + Map policyMap = Collections.singletonMap(newPolicy.getName(), + new LifecyclePolicyMetadata(newPolicy, headers, randomNonNegativeLong(), randomNonNegativeLong())); + IndexLifecycleMetadata lifecycleMetadata = new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase("new"); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata) + .put(IndexMetaData.builder("test") + .settings(Settings.builder() + .put("index.uuid", "uuid") + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT.id) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap())) + .build(); + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + metaData.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + logger.info("--> metadata: {}", Strings.toString(builder)); + } + String nodeId = randomAlphaOfLength(10); + DiscoveryNode masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT) + .put(Node.NODE_MASTER_SETTING.getKey(), true).build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + + // start with empty registry + PolicyStepsRegistry registry = new PolicyStepsRegistry(NamedXContentRegistry.EMPTY, client); + + // add new policy + registry.update(currentState); + + assertThat(registry.getFirstStep(newPolicy.getName()), equalTo(policySteps.get(0))); + assertThat(registry.getLifecyclePolicyMap().size(), equalTo(1)); + assertNotNull(registry.getLifecyclePolicyMap().get(newPolicy.getName())); + assertThat(registry.getLifecyclePolicyMap().get(newPolicy.getName()).getHeaders(), equalTo(headers)); + assertThat(registry.getFirstStepMap().size(), equalTo(1)); + assertThat(registry.getStepMap().size(), equalTo(1)); + Map registeredStepsForPolicy = registry.getStepMap().get(newPolicy.getName()); + assertThat(registeredStepsForPolicy.size(), equalTo(policySteps.size())); + for (Step step : policySteps) { + LifecycleExecutionState.Builder newIndexState = LifecycleExecutionState.builder(); + newIndexState.setPhase(step.getKey().getPhase()); + currentState = ClusterState.builder(currentState) + .metaData(MetaData.builder(currentState.metaData()) + .put(IndexMetaData.builder(currentState.metaData().index("test")) + .settings(Settings.builder().put(currentState.metaData().index("test").getSettings())) + .putCustom(ILM_CUSTOM_METADATA_KEY, newIndexState.build().asMap()))) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + registry.update(currentState); + assertThat(registeredStepsForPolicy.get(step.getKey()), equalTo(step)); + assertThat(registry.getStep(metaData.index(index), step.getKey()), equalTo(step)); + } + + Map registryPolicyMap = registry.getLifecyclePolicyMap(); + Map registryFirstStepMap = registry.getFirstStepMap(); + Map> registryStepMap = registry.getStepMap(); + registry.update(currentState); + assertThat(registry.getLifecyclePolicyMap(), equalTo(registryPolicyMap)); + assertThat(registry.getFirstStepMap(), equalTo(registryFirstStepMap)); + assertThat(registry.getStepMap(), equalTo(registryStepMap)); + + // remove policy + lifecycleMetadata = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + currentState = ClusterState.builder(currentState) + .metaData( + MetaData.builder(metaData) + .putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata)).build(); + registry.update(currentState); + assertTrue(registry.getLifecyclePolicyMap().isEmpty()); + assertTrue(registry.getFirstStepMap().isEmpty()); + assertTrue(registry.getStepMap().isEmpty()); + } + + public void testUpdateChangedPolicy() { + Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + String policyName = randomAlphaOfLengthBetween(5, 10); + LifecyclePolicy newPolicy = LifecyclePolicyTests.randomTestLifecyclePolicy(policyName); + Map headers = new HashMap<>(); + if (randomBoolean()) { + headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); + headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + Map policyMap = Collections.singletonMap(newPolicy.getName(), + new LifecyclePolicyMetadata(newPolicy, headers, randomNonNegativeLong(), randomNonNegativeLong())); + IndexLifecycleMetadata lifecycleMetadata = new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata) + .build(); + String nodeId = randomAlphaOfLength(10); + DiscoveryNode masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT) + .put(Node.NODE_MASTER_SETTING.getKey(), true).build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + PolicyStepsRegistry registry = new PolicyStepsRegistry(NamedXContentRegistry.EMPTY, client); + // add new policy + registry.update(currentState); + + // swap out policy + newPolicy = LifecyclePolicyTests.randomTestLifecyclePolicy(policyName); + lifecycleMetadata = new IndexLifecycleMetadata(Collections.singletonMap(policyName, + new LifecyclePolicyMetadata(newPolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())), OperationMode.RUNNING); + currentState = ClusterState.builder(currentState) + .metaData(MetaData.builder(metaData).putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata)).build(); + registry.update(currentState); + // TODO(talevy): assert changes... right now we do not support updates to policies. will require internal cleanup + } + + public void testUpdatePolicyButNoPhaseChangeIndexStepsDontChange() throws Exception { + Index index = new Index("test", "uuid"); + Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + String policyName = randomAlphaOfLength(5); + Map actions = new HashMap<>(); + actions.put("shrink", new ShrinkAction(1)); + Map phases = new HashMap<>(); + Phase warmPhase = new Phase("warm", TimeValue.ZERO, actions); + PhaseExecutionInfo pei = new PhaseExecutionInfo(policyName, warmPhase, 1, randomNonNegativeLong()); + String phaseJson = Strings.toString(pei); + phases.put("warm", new Phase("warm", TimeValue.ZERO, actions)); + LifecyclePolicy newPolicy = new LifecyclePolicy(policyName, phases); + // Modify the policy + actions = new HashMap<>(); + actions.put("shrink", new ShrinkAction(2)); + phases = new HashMap<>(); + phases.put("warm", new Phase("warm", TimeValue.ZERO, actions)); + LifecyclePolicy updatedPolicy = new LifecyclePolicy(policyName, phases); + logger.info("--> policy: {}", newPolicy); + logger.info("--> updated policy: {}", updatedPolicy); + List policySteps = newPolicy.toSteps(client); + Map headers = new HashMap<>(); + if (randomBoolean()) { + headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); + headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + Map policyMap = Collections.singletonMap(newPolicy.getName(), + new LifecyclePolicyMetadata(newPolicy, headers, randomNonNegativeLong(), randomNonNegativeLong())); + IndexLifecycleMetadata lifecycleMetadata = new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase("warm"); + lifecycleState.setPhaseDefinition(phaseJson); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata) + .put(IndexMetaData.builder("test") + .settings(Settings.builder() + .put("index.uuid", "uuid") + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT.id) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap())) + .build(); + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + metaData.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + logger.info("--> metadata: {}", Strings.toString(builder)); + } + String nodeId = randomAlphaOfLength(10); + DiscoveryNode masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT) + .put(Node.NODE_MASTER_SETTING.getKey(), true).build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + + // start with empty registry + PolicyStepsRegistry registry = new PolicyStepsRegistry(REGISTRY, client); + + // add new policy + registry.update(currentState); + + Map registeredStepsForPolicy = registry.getStepMap().get(newPolicy.getName()); + Step shrinkStep = registeredStepsForPolicy.entrySet().stream() + .filter(e -> e.getKey().getPhase().equals("warm") && e.getKey().getName().equals("shrink")) + .findFirst().get().getValue(); + Step gotStep = registry.getStep(metaData.index(index), shrinkStep.getKey()); + assertThat(((ShrinkStep) shrinkStep).getNumberOfShards(), equalTo(1)); + assertThat(((ShrinkStep) gotStep).getNumberOfShards(), equalTo(1)); + + // Update the policy with the new policy, but keep the phase the same + policyMap = Collections.singletonMap(updatedPolicy.getName(), new LifecyclePolicyMetadata(updatedPolicy, headers, + randomNonNegativeLong(), randomNonNegativeLong())); + lifecycleMetadata = new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING); + metaData = MetaData.builder(metaData) + .putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata) + .build(); + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + metaData.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + logger.info("--> metadata: {}", Strings.toString(builder)); + } + currentState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + + // Update the policies + registry.update(currentState); + + registeredStepsForPolicy = registry.getStepMap().get(newPolicy.getName()); + shrinkStep = registeredStepsForPolicy.entrySet().stream() + .filter(e -> e.getKey().getPhase().equals("warm") && e.getKey().getName().equals("shrink")) + .findFirst().get().getValue(); + gotStep = registry.getStep(metaData.index(index), shrinkStep.getKey()); + assertThat(((ShrinkStep) shrinkStep).getNumberOfShards(), equalTo(2)); + assertThat(((ShrinkStep) gotStep).getNumberOfShards(), equalTo(1)); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/RandomStepInfo.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/RandomStepInfo.java new file mode 100644 index 0000000000000..85084223481c3 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/RandomStepInfo.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; +import java.util.function.Supplier; + +public class RandomStepInfo implements ToXContentObject { + + private final String key; + private final String value; + + public RandomStepInfo(Supplier randomStringSupplier) { + this.key = randomStringSupplier.get(); + this.value = randomStringSupplier.get(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(key, value); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(key, value); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RandomStepInfo other = (RandomStepInfo) obj; + return Objects.equals(key, other.key) && Objects.equals(value, other.value); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} \ No newline at end of file diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTaskTests.java new file mode 100644 index 0000000000000..a8b16d3ecfdf9 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTaskTests.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.Index; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.junit.Before; + +import java.io.IOException; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; + +public class SetStepInfoUpdateTaskTests extends ESTestCase { + + String policy; + ClusterState clusterState; + Index index; + + @Before + public void setupClusterState() { + policy = randomAlphaOfLength(10); + IndexMetaData indexMetadata = IndexMetaData.builder(randomAlphaOfLength(5)) + .settings(settings(Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, policy)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + index = indexMetadata.getIndex(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(indexMetadata)) + .build(); + clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + } + + public void testExecuteSuccessfullySet() throws IOException { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + ToXContentObject stepInfo = getRandomStepInfo(); + setStateToKey(currentStepKey); + + SetStepInfoUpdateTask task = new SetStepInfoUpdateTask(index, policy, currentStepKey, stepInfo); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey actualKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(actualKey, equalTo(currentStepKey)); + assertThat(lifecycleState.getPhaseTime(), nullValue()); + assertThat(lifecycleState.getActionTime(), nullValue()); + assertThat(lifecycleState.getStepTime(), nullValue()); + + XContentBuilder infoXContentBuilder = JsonXContent.contentBuilder(); + stepInfo.toXContent(infoXContentBuilder, ToXContent.EMPTY_PARAMS); + String expectedCauseValue = BytesReference.bytes(infoXContentBuilder).utf8ToString(); + assertThat(lifecycleState.getStepInfo(), equalTo(expectedCauseValue)); + } + + private ToXContentObject getRandomStepInfo() { + String key = randomAlphaOfLength(20); + String value = randomAlphaOfLength(20); + return (b, p) -> { + b.startObject(); + b.field(key, value); + b.endObject(); + return b; + }; + } + + public void testExecuteNoopDifferentStep() throws IOException { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + StepKey notCurrentStepKey = new StepKey("not-current", "not-current", "not-current"); + ToXContentObject stepInfo = getRandomStepInfo(); + setStateToKey(notCurrentStepKey); + SetStepInfoUpdateTask task = new SetStepInfoUpdateTask(index, policy, currentStepKey, stepInfo); + ClusterState newState = task.execute(clusterState); + assertThat(newState, sameInstance(clusterState)); + } + + public void testExecuteNoopDifferentPolicy() throws IOException { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + ToXContentObject stepInfo = getRandomStepInfo(); + setStateToKey(currentStepKey); + setStatePolicy("not-" + policy); + SetStepInfoUpdateTask task = new SetStepInfoUpdateTask(index, policy, currentStepKey, stepInfo); + ClusterState newState = task.execute(clusterState); + assertThat(newState, sameInstance(clusterState)); + } + + public void testOnFailure() { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + ToXContentObject stepInfo = getRandomStepInfo(); + + setStateToKey(currentStepKey); + + SetStepInfoUpdateTask task = new SetStepInfoUpdateTask(index, policy, currentStepKey, stepInfo); + Exception expectedException = new RuntimeException(); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> task.onFailure(randomAlphaOfLength(10), expectedException)); + assertEquals("policy [" + policy + "] for index [" + index.getName() + "] failed trying to set step info for step [" + + currentStepKey + "].", exception.getMessage()); + assertSame(expectedException, exception.getCause()); + } + + private void setStatePolicy(String policy) { + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .updateSettings(Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, policy).build(), index.getName())).build(); + + } + private void setStateToKey(StepKey stepKey) { + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder( + LifecycleExecutionState.fromIndexMetadata(clusterState.metaData().index(index))); + lifecycleState.setPhase(stepKey.getPhase()); + lifecycleState.setAction(stepKey.getAction()); + lifecycleState.setStep(stepKey.getName()); + + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.getMetaData()) + .put(IndexMetaData.builder(clusterState.getMetaData().index(index)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()))).build(); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeValueScheduleTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeValueScheduleTests.java new file mode 100644 index 0000000000000..f34a2aa458d16 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeValueScheduleTests.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.util.concurrent.TimeUnit; + +public class TimeValueScheduleTests extends ESTestCase { + + public TimeValueSchedule createRandomInstance() { + return new TimeValueSchedule(createRandomTimeValue()); + } + + private TimeValue createRandomTimeValue() { + return new TimeValue(randomLongBetween(1, 10000), randomFrom(TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS)); + } + + public void testHascodeAndEquals() { + for (int i = 0; i < 20; i++) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createRandomInstance(), + instance -> new TimeValueSchedule(instance.getInterval()), + instance -> new TimeValueSchedule(randomValueOtherThan(instance.getInterval(), () -> createRandomTimeValue()))); + } + } + + public void testNextScheduledTimeFirstTriggerNotReached() { + long start = randomNonNegativeLong(); + TimeValue interval = createRandomTimeValue(); + long triggerTime = start + interval.millis(); + long now = start + randomLongBetween(0, interval.millis() - 1); + TimeValueSchedule schedule = new TimeValueSchedule(interval); + assertEquals(triggerTime, schedule.nextScheduledTimeAfter(start, now)); + } + + public void testNextScheduledTimeAtFirstInterval() { + long start = randomNonNegativeLong(); + TimeValue interval = createRandomTimeValue(); + long triggerTime = start + 2 * interval.millis(); + long now = start + interval.millis(); + TimeValueSchedule schedule = new TimeValueSchedule(interval); + assertEquals(triggerTime, schedule.nextScheduledTimeAfter(start, now)); + } + + public void testNextScheduledTimeAtStartTime() { + long start = randomNonNegativeLong(); + TimeValue interval = createRandomTimeValue(); + long triggerTime = start + interval.millis(); + TimeValueSchedule schedule = new TimeValueSchedule(interval); + assertEquals(triggerTime, schedule.nextScheduledTimeAfter(start, start)); + } + + public void testNextScheduledTimeAfterFirstTrigger() { + long start = randomNonNegativeLong(); + TimeValue interval = createRandomTimeValue(); + long numberIntervalsPassed = randomLongBetween(0, 10000); + long triggerTime = start + (numberIntervalsPassed + 1) * interval.millis(); + long now = start + + randomLongBetween(numberIntervalsPassed * interval.millis(), (numberIntervalsPassed + 1) * interval.millis() - 1); + TimeValueSchedule schedule = new TimeValueSchedule(interval); + assertEquals(triggerTime, schedule.nextScheduledTimeAfter(start, now)); + } + + public void testInvalidInterval() { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> new TimeValueSchedule(new TimeValue(0))); + assertEquals("interval must be greater than 0 milliseconds", exception.getMessage()); + } +} diff --git a/x-pack/plugin/ml/qa/basic-multi-node/build.gradle b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle index cc5a2cd68dde5..b47016c134459 100644 --- a/x-pack/plugin/ml/qa/basic-multi-node/build.gradle +++ b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle @@ -7,6 +7,7 @@ dependencies { } integTestCluster { + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.watcher.enabled', 'false' diff --git a/x-pack/plugin/ml/qa/disabled/build.gradle b/x-pack/plugin/ml/qa/disabled/build.gradle index a24036651d504..2aa5d47acef0d 100644 --- a/x-pack/plugin/ml/qa/disabled/build.gradle +++ b/x-pack/plugin/ml/qa/disabled/build.gradle @@ -7,6 +7,7 @@ dependencies { } integTestCluster { + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'false' setting 'xpack.ml.enabled', 'false' numNodes = 1 diff --git a/x-pack/plugin/ml/qa/single-node-tests/build.gradle b/x-pack/plugin/ml/qa/single-node-tests/build.gradle index 88ca4dd118ea4..f856c3d4c5ff4 100644 --- a/x-pack/plugin/ml/qa/single-node-tests/build.gradle +++ b/x-pack/plugin/ml/qa/single-node-tests/build.gradle @@ -7,6 +7,7 @@ dependencies { } integTestCluster { + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java index 67e21aadcbceb..075a9b889b041 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java @@ -24,6 +24,7 @@ import java.util.function.Predicate; import static org.elasticsearch.xpack.core.ClientHelper.DEPRECATION_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.INDEX_LIFECYCLE_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.MONITORING_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.PERSISTENT_TASK_ORIGIN; @@ -111,6 +112,7 @@ public static void switchUserBasedOnActionOriginAndExecute(ThreadContext threadC case DEPRECATION_ORIGIN: case PERSISTENT_TASK_ORIGIN: case ROLLUP_ORIGIN: + case INDEX_LIFECYCLE_ORIGIN: securityContext.executeAsUser(XPackUser.INSTANCE, consumer, Version.CURRENT); break; default: diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java index 9c9f2b1b1a42a..66b1e9d9c2ad4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java @@ -140,7 +140,7 @@ public void testSwitchAndExecuteXpackUser() throws Exception { threadContext.putHeader(headerName, headerValue); threadContext.putTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME, randomFrom(ClientHelper.ML_ORIGIN, ClientHelper.WATCHER_ORIGIN, ClientHelper.DEPRECATION_ORIGIN, - ClientHelper.MONITORING_ORIGIN, ClientHelper.PERSISTENT_TASK_ORIGIN)); + ClientHelper.MONITORING_ORIGIN, ClientHelper.PERSISTENT_TASK_ORIGIN, ClientHelper.INDEX_LIFECYCLE_ORIGIN)); AuthorizationUtils.switchUserBasedOnActionOriginAndExecute(threadContext, securityContext, consumer); diff --git a/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java b/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java index 2f7faad13ab35..397618f9d8e64 100644 --- a/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java +++ b/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.test.rest; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.http.HttpStatus; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.client.Request; diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.delete_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.delete_lifecycle.json new file mode 100644 index 0000000000000..4deaaaffc15b4 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.delete_lifecycle.json @@ -0,0 +1,19 @@ +{ + "ilm.delete_lifecycle": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "DELETE" ], + "url": { + "path": "/_ilm/policy/{policy}", + "paths": ["/_ilm/policy/{policy}"], + "parts": { + "policy": { + "type" : "string", + "description" : "The name of the index lifecycle policy" + } + }, + "params": { + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.explain_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.explain_lifecycle.json new file mode 100644 index 0000000000000..de1b0deb5b9e0 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.explain_lifecycle.json @@ -0,0 +1,24 @@ +{ + "ilm.explain_lifecycle": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "GET" ], + "url": { + "path": "/{index}/_ilm/explain", + "paths": ["/{index}/_ilm/explain"], + "parts": { + "index": { + "type" : "string", + "description" : "The name of the index to explain" + } + }, + "params": { + "human": { + "type" : "boolean", + "default" : "false", + "description" : "Return data such as dates in a human readable format" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.get_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.get_lifecycle.json new file mode 100644 index 0000000000000..9fbabb964792e --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.get_lifecycle.json @@ -0,0 +1,19 @@ +{ + "ilm.get_lifecycle": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "GET" ], + "url": { + "path": "/_ilm/policy/{policy}", + "paths": ["/_ilm/policy/{policy}", "/_ilm/policy"], + "parts": { + "policy": { + "type" : "string", + "description" : "The name of the index lifecycle policy" + } + }, + "params": { + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.get_status.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.get_status.json new file mode 100644 index 0000000000000..1bf2c1adf0263 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.get_status.json @@ -0,0 +1,13 @@ +{ + "ilm.get_status": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "GET" ], + "url": { + "path": "/_ilm/status", + "paths": ["/_ilm/status"], + "parts": {}, + "params": {} + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.move_to_step.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.move_to_step.json new file mode 100644 index 0000000000000..ca3f1e76fb256 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.move_to_step.json @@ -0,0 +1,21 @@ +{ + "ilm.move_to_step": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "POST" ], + "url": { + "path": "/_ilm/move/{index}", + "paths": ["/_ilm/move/{index}"], + "parts": { + "index": { + "type" : "string", + "description" : "The name of the index whose lifecycle step is to change" + } + }, + "params": { + } + }, + "body": { + "description": "The new lifecycle step to move to" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.put_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.put_lifecycle.json new file mode 100644 index 0000000000000..f23b3b3eb1537 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.put_lifecycle.json @@ -0,0 +1,21 @@ +{ + "ilm.put_lifecycle": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "PUT" ], + "url": { + "path": "/_ilm/policy/{policy}", + "paths": ["/_ilm/policy/{policy}"], + "parts": { + "policy": { + "type" : "string", + "description" : "The name of the index lifecycle policy" + } + }, + "params": { + } + }, + "body": { + "description": "The lifecycle policy definition to register" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.remove_policy.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.remove_policy.json new file mode 100644 index 0000000000000..72834eb380d34 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.remove_policy.json @@ -0,0 +1,19 @@ +{ + "ilm.remove_policy": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "DELETE" ], + "url": { + "path": "/{index}/_ilm", + "paths": ["/{index}/_ilm", "/_ilm"], + "parts": { + "index": { + "type" : "string", + "description" : "The name of the index to remove policy on" + } + }, + "params": { + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.retry.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.retry.json new file mode 100644 index 0000000000000..af3e7fd43eb56 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.retry.json @@ -0,0 +1,19 @@ +{ + "ilm.retry": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "POST" ], + "url": { + "path": "/{index}/_ilm/retry", + "paths": ["/{index}/_ilm/retry"], + "parts": { + "index": { + "type" : "string", + "description" : "The name of the indices (comma-separated) whose failed lifecycle step is to be retry" + } + }, + "params": { + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.start.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.start.json new file mode 100644 index 0000000000000..0f2c6e347c452 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.start.json @@ -0,0 +1,13 @@ +{ + "ilm.start": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "POST" ], + "url": { + "path": "/_ilm/start", + "paths": ["/_ilm/start"], + "parts": {}, + "params": {} + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.stop.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.stop.json new file mode 100644 index 0000000000000..f4d6526765971 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.stop.json @@ -0,0 +1,13 @@ +{ + "ilm.stop": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "POST" ], + "url": { + "path": "/_ilm/stop", + "paths": ["/_ilm/stop"], + "parts": {}, + "params": {} + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml index ba75c7bffc80e..428099687d7b5 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml @@ -14,11 +14,12 @@ - match: { nodes.$master.modules.15.name: x-pack-core } - match: { nodes.$master.modules.16.name: x-pack-deprecation } - match: { nodes.$master.modules.17.name: x-pack-graph } - - match: { nodes.$master.modules.18.name: x-pack-logstash } - - match: { nodes.$master.modules.19.name: x-pack-ml } - - match: { nodes.$master.modules.20.name: x-pack-monitoring } - - match: { nodes.$master.modules.21.name: x-pack-rollup } - - match: { nodes.$master.modules.22.name: x-pack-security } - - match: { nodes.$master.modules.23.name: x-pack-sql } - - match: { nodes.$master.modules.24.name: x-pack-upgrade } - - match: { nodes.$master.modules.25.name: x-pack-watcher } + - match: { nodes.$master.modules.18.name: x-pack-ilm } + - match: { nodes.$master.modules.19.name: x-pack-logstash } + - match: { nodes.$master.modules.20.name: x-pack-ml } + - match: { nodes.$master.modules.21.name: x-pack-monitoring } + - match: { nodes.$master.modules.22.name: x-pack-rollup } + - match: { nodes.$master.modules.23.name: x-pack-security } + - match: { nodes.$master.modules.24.name: x-pack-sql } + - match: { nodes.$master.modules.25.name: x-pack-upgrade } + - match: { nodes.$master.modules.26.name: x-pack-watcher } diff --git a/x-pack/qa/audit-tests/build.gradle b/x-pack/qa/audit-tests/build.gradle index 8af672fe92aee..126e3834bab4a 100644 --- a/x-pack/qa/audit-tests/build.gradle +++ b/x-pack/qa/audit-tests/build.gradle @@ -17,6 +17,7 @@ project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) integTestCluster { distribution 'zip' + setting 'xpack.ilm.enabled', 'false' setting 'xpack.ml.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.security.enabled', 'true' diff --git a/x-pack/qa/core-rest-tests-with-security/build.gradle b/x-pack/qa/core-rest-tests-with-security/build.gradle index 7f2706a773aa9..e2c2f585a8246 100644 --- a/x-pack/qa/core-rest-tests-with-security/build.gradle +++ b/x-pack/qa/core-rest-tests-with-security/build.gradle @@ -23,6 +23,7 @@ integTestRunner { } integTestCluster { + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'true' setting 'xpack.watcher.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' diff --git a/x-pack/qa/multi-cluster-search-security/build.gradle b/x-pack/qa/multi-cluster-search-security/build.gradle index e79490df829e3..c31b2c0ad1d5e 100644 --- a/x-pack/qa/multi-cluster-search-security/build.gradle +++ b/x-pack/qa/multi-cluster-search-security/build.gradle @@ -16,6 +16,7 @@ remoteClusterTestCluster { numNodes = 2 clusterName = 'remote-cluster' setting 'cluster.remote.connect', false + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'true' setting 'xpack.watcher.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' diff --git a/x-pack/qa/multi-node/build.gradle b/x-pack/qa/multi-node/build.gradle index 4369287caba32..243a6f40438cc 100644 --- a/x-pack/qa/multi-node/build.gradle +++ b/x-pack/qa/multi-node/build.gradle @@ -8,6 +8,7 @@ dependencies { integTestCluster { numNodes = 2 clusterName = 'multi-node' + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'true' setting 'xpack.watcher.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' diff --git a/x-pack/qa/reindex-tests-with-security/build.gradle b/x-pack/qa/reindex-tests-with-security/build.gradle index 97c0e8e17fee7..ea2b7d6990622 100644 --- a/x-pack/qa/reindex-tests-with-security/build.gradle +++ b/x-pack/qa/reindex-tests-with-security/build.gradle @@ -12,6 +12,7 @@ dependencies { integTestCluster { // Whitelist reindexing from the local node so we can test it. setting 'reindex.remote.whitelist', '127.0.0.1:*' + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'true' setting 'xpack.ml.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java index a1430965339c3..3b72674ed0751 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java @@ -43,6 +43,11 @@ protected boolean preserveRollupJobsUponCompletion() { return true; } + @Override + protected boolean preserveILMPoliciesUponCompletion() { + return true; + } + enum ClusterType { OLD, MIXED, diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java index 12add3d7feaa8..5a9c866058dc2 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java @@ -64,6 +64,11 @@ protected boolean preserveRollupJobsUponCompletion() { return true; } + @Override + protected boolean preserveILMPoliciesUponCompletion() { + return true; + } + public UpgradeClusterClientYamlTestSuiteIT(ClientYamlTestCandidate testCandidate) { super(testCandidate); } diff --git a/x-pack/qa/security-client-tests/build.gradle b/x-pack/qa/security-client-tests/build.gradle index e676e55a152d4..556e36e51467f 100644 --- a/x-pack/qa/security-client-tests/build.gradle +++ b/x-pack/qa/security-client-tests/build.gradle @@ -19,6 +19,7 @@ integTestRunner { } integTestCluster { + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'true' setting 'xpack.ml.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' diff --git a/x-pack/qa/security-example-spi-extension/build.gradle b/x-pack/qa/security-example-spi-extension/build.gradle index aef4fc33f6abe..c75573431fb50 100644 --- a/x-pack/qa/security-example-spi-extension/build.gradle +++ b/x-pack/qa/security-example-spi-extension/build.gradle @@ -27,6 +27,7 @@ integTestCluster { setting 'xpack.security.authc.realms.native.type', 'native' setting 'xpack.security.authc.realms.native.order', '2' setting 'xpack.security.enabled', 'true' + setting 'xpack.ilm.enabled', 'false' setting 'xpack.ml.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle b/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle index 7813ff3d3d56c..57be337f634f2 100644 --- a/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle +++ b/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle @@ -8,6 +8,7 @@ dependencies { } integTestCluster { + setting 'xpack.ilm.enabled', 'false' setting 'xpack.monitoring.enabled', 'true' setting 'xpack.watcher.enabled', 'true' setting 'xpack.security.enabled', 'false' diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index 4f338d07fb531..76c887ab04a2b 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -65,12 +65,14 @@ integTestCluster { setting 'xpack.monitoring.exporters._http.ssl.verification_mode', 'full' setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'true' setting 'xpack.security.http.ssl.enabled', 'true' setting 'xpack.security.http.ssl.key', 'testnode.pem' setting 'xpack.security.http.ssl.certificate', 'testnode.crt' keystoreSetting 'xpack.security.http.ssl.secure_key_passphrase', 'testnode' + setting 'xpack.ilm.enabled', 'false' setting 'xpack.ml.enabled', 'false' // copy keystores, keys and certificates into config/ extraConfigFile nodeKeystore.name, nodeKeystore diff --git a/x-pack/qa/smoke-test-watcher-with-security/build.gradle b/x-pack/qa/smoke-test-watcher-with-security/build.gradle index 50e217b28b270..f5007e5b0910b 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/build.gradle +++ b/x-pack/qa/smoke-test-watcher-with-security/build.gradle @@ -14,6 +14,7 @@ task copyWatcherRestTests(type: Copy) { integTestCluster { dependsOn copyWatcherRestTests + setting 'xpack.ilm.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.ml.enabled', 'false' setting 'xpack.security.enabled', 'true' diff --git a/x-pack/qa/smoke-test-watcher/build.gradle b/x-pack/qa/smoke-test-watcher/build.gradle index 5923afcacad94..fc22fe9aa065f 100644 --- a/x-pack/qa/smoke-test-watcher/build.gradle +++ b/x-pack/qa/smoke-test-watcher/build.gradle @@ -14,6 +14,7 @@ dependencies { } integTestCluster { + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.ml.enabled', 'false'