From 82bae15ec04a64c70d3affd66d0c372db0ff87e0 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Wed, 5 Jun 2019 19:03:05 +0300 Subject: [PATCH 1/3] [FEATURE][ML] Ensure data extractor is not leaking scroll contexts --- .../extractor/DataFrameDataExtractor.java | 50 +-- .../DataFrameDataExtractorTests.java | 341 ++++++++++++++++++ 2 files changed, 369 insertions(+), 22 deletions(-) create mode 100644 x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java index a45185ebe213f..7b8452f635f9e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java @@ -7,6 +7,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.SearchAction; @@ -20,7 +21,6 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedField; import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsFields; @@ -34,6 +34,7 @@ import java.util.Objects; import java.util.Optional; import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; import java.util.stream.Collectors; /** @@ -91,9 +92,28 @@ public Optional> next() throws IOException { protected List initScroll() throws IOException { LOGGER.debug("[{}] Initializing scroll", context.jobId); - SearchResponse searchResponse = executeSearchRequest(buildSearchRequest()); - LOGGER.debug("[{}] Search response was obtained", context.jobId); - return processSearchResponse(searchResponse); + return tryRequestWithSearchResponse(() -> executeSearchRequest(buildSearchRequest())); + } + + private List tryRequestWithSearchResponse(Supplier request) throws IOException { + try { + // We've set allow_partial_search_results to false which means if something + // goes wrong the request will throw. + SearchResponse searchResponse = request.get(); + LOGGER.debug("[{}] Search response was obtained", context.jobId); + + // Request was successful so we can restore the flag to retry if a future failure occurs + searchHasShardFailure = false; + + return processSearchResponse(searchResponse); + } catch (Exception e) { + if (searchHasShardFailure) { + throw e; + } + LOGGER.warn(new ParameterizedMessage("[{}] Search resulted to failure; retrying once", context.jobId), e); + markScrollAsErrored(); + return initScroll(); + } } protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequestBuilder) { @@ -103,6 +123,8 @@ protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequest private SearchRequestBuilder buildSearchRequest() { SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client, SearchAction.INSTANCE) .setScroll(SCROLL_TIMEOUT) + // This ensures the search throws if there are failures and the scroll context gets cleared automatically + .setAllowPartialSearchResults(false) .addSort(DataFrameAnalyticsFields.ID, SortOrder.ASC) .setIndices(context.indices) .setSize(context.scrollSize) @@ -117,14 +139,6 @@ private SearchRequestBuilder buildSearchRequest() { } private List processSearchResponse(SearchResponse searchResponse) throws IOException { - - if (searchResponse.getFailedShards() > 0 && searchHasShardFailure == false) { - LOGGER.debug("[{}] Resetting scroll search after shard failure", context.jobId); - markScrollAsErrored(); - return initScroll(); - } - - ExtractorUtils.checkSearchWasSuccessful(context.jobId, searchResponse); scrollId = searchResponse.getScrollId(); if (searchResponse.getHits().getHits().length == 0) { hasNext = false; @@ -143,7 +157,6 @@ private List processSearchResponse(SearchResponse searchResponse) throws IO rows.add(createRow(hit)); } return rows; - } private Row createRow(SearchHit hit) { @@ -163,15 +176,13 @@ private Row createRow(SearchHit hit) { private List continueScroll() throws IOException { LOGGER.debug("[{}] Continuing scroll with id [{}]", context.jobId, scrollId); - SearchResponse searchResponse = executeSearchScrollRequest(scrollId); - LOGGER.debug("[{}] Search response was obtained", context.jobId); - return processSearchResponse(searchResponse); + return tryRequestWithSearchResponse(() -> executeSearchScrollRequest(scrollId)); } private void markScrollAsErrored() { // This could be a transient error with the scroll Id. // Reinitialise the scroll and try again but only once. - resetScroll(); + scrollId = null; searchHasShardFailure = true; } @@ -183,11 +194,6 @@ protected SearchResponse executeSearchScrollRequest(String scrollId) { .get()); } - private void resetScroll() { - clearScroll(scrollId); - scrollId = null; - } - private void clearScroll(String scrollId) { if (scrollId != null) { ClearScrollRequest request = new ClearScrollRequest(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java new file mode 100644 index 0000000000000..f6547e1e6e583 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java @@ -0,0 +1,341 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.dataframe.extractor; + +import org.apache.lucene.search.TotalHits; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.search.ClearScrollAction; +import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.action.search.ClearScrollResponse; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedField; +import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedFields; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Queue; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.mockito.Matchers.same; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DataFrameDataExtractorTests extends ESTestCase { + + private static final String JOB_ID = "foo"; + + private Client client; + private List indices; + private ExtractedFields extractedFields; + private QueryBuilder query; + private int scrollSize; + private Map headers; + private ArgumentCaptor capturedClearScrollRequests; + private ActionFuture clearScrollFuture; + + @Before + public void setUpTests() { + ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + + indices = Arrays.asList("index-1", "index-2"); + query = QueryBuilders.matchAllQuery(); + extractedFields = new ExtractedFields(Arrays.asList( + ExtractedField.newField("field_1", ExtractedField.ExtractionMethod.DOC_VALUE), + ExtractedField.newField("field_2", ExtractedField.ExtractionMethod.DOC_VALUE))); + scrollSize = 1000; + headers = Collections.emptyMap(); + + clearScrollFuture = mock(ActionFuture.class); + capturedClearScrollRequests = ArgumentCaptor.forClass(ClearScrollRequest.class); + when(client.execute(same(ClearScrollAction.INSTANCE), capturedClearScrollRequests.capture())).thenReturn(clearScrollFuture); + } + + public void testTwoPageExtraction() throws IOException { + TestExtractor dataExtractor = createExtractor(true); + + // First batch + SearchResponse response1 = createSearchResponse(Arrays.asList(1_1, 1_2, 1_3), Arrays.asList(2_1, 2_2, 2_3)); + dataExtractor.setNextResponse(response1); + + // Second batch + SearchResponse response2 = createSearchResponse(Arrays.asList(3_1), Arrays.asList(4_1)); + dataExtractor.setNextResponse(response2); + + // Third batch is empty + SearchResponse lastAndEmptyResponse = createEmptySearchResponse(); + dataExtractor.setNextResponse(lastAndEmptyResponse); + + assertThat(dataExtractor.hasNext(), is(true)); + + // First batch + Optional> rows = dataExtractor.next(); + assertThat(rows.isPresent(), is(true)); + assertThat(rows.get().size(), equalTo(3)); + assertThat(rows.get().get(0).getValues(), equalTo(new String[] {"11", "21"})); + assertThat(rows.get().get(1).getValues(), equalTo(new String[] {"12", "22"})); + assertThat(rows.get().get(2).getValues(), equalTo(new String[] {"13", "23"})); + assertThat(dataExtractor.hasNext(), is(true)); + + // Second batch + rows = dataExtractor.next(); + assertThat(rows.isPresent(), is(true)); + assertThat(rows.get().size(), equalTo(1)); + assertThat(rows.get().get(0).getValues(), equalTo(new String[] {"31", "41"})); + assertThat(dataExtractor.hasNext(), is(true)); + + // Third batch should return empty + rows = dataExtractor.next(); + assertThat(rows.isEmpty(), is(true)); + assertThat(dataExtractor.hasNext(), is(false)); + + // Now let's assert we're sending the expected search request + assertThat(dataExtractor.capturedSearchRequests.size(), equalTo(1)); + String searchRequest = dataExtractor.capturedSearchRequests.get(0).request().toString().replaceAll("\\s", ""); + assertThat(searchRequest, containsString("allowPartialSearchResults=false")); + assertThat(searchRequest, containsString("indices=[index-1,index-2]")); + assertThat(searchRequest, containsString("\"size\":1000")); + assertThat(searchRequest, containsString("\"query\":{\"match_all\":{\"boost\":1.0}}")); + assertThat(searchRequest, containsString("\"docvalue_fields\":[{\"field\":\"field_1\"},{\"field\":\"field_2\"}]")); + assertThat(searchRequest, containsString("\"_source\":{\"includes\":[],\"excludes\":[]}")); + assertThat(searchRequest, containsString("\"sort\":[{\"_id_copy\":{\"order\":\"asc\"}}]")); + + // Check continue scroll requests had correct ids + assertThat(dataExtractor.capturedContinueScrollIds.size(), equalTo(2)); + assertThat(dataExtractor.capturedContinueScrollIds.get(0), equalTo(response1.getScrollId())); + assertThat(dataExtractor.capturedContinueScrollIds.get(1), equalTo(response2.getScrollId())); + + // Check we cleared the scroll with the latest scroll id + List capturedClearScrollRequests = getCapturedClearScrollIds(); + assertThat(capturedClearScrollRequests.size(), equalTo(1)); + assertThat(capturedClearScrollRequests.get(0), equalTo(lastAndEmptyResponse.getScrollId())); + } + + public void testRecoveryFromErrorOnSearchAfterRetry() throws IOException { + TestExtractor dataExtractor = createExtractor(true); + + // First search will fail + dataExtractor.setNextResponse(createResponseWithShardFailures()); + + // Next one will succeed + SearchResponse response = createSearchResponse(Arrays.asList(1_1), Arrays.asList(2_1)); + dataExtractor.setNextResponse(response); + + // Last one + SearchResponse lastAndEmptyResponse = createEmptySearchResponse(); + dataExtractor.setNextResponse(lastAndEmptyResponse); + + assertThat(dataExtractor.hasNext(), is(true)); + + // First batch expected as normally since we'll retry after the error + Optional> rows = dataExtractor.next(); + assertThat(rows.isPresent(), is(true)); + assertThat(rows.get().size(), equalTo(1)); + assertThat(rows.get().get(0).getValues(), equalTo(new String[] {"11", "21"})); + assertThat(dataExtractor.hasNext(), is(true)); + + // Next batch should return empty + rows = dataExtractor.next(); + assertThat(rows.isEmpty(), is(true)); + assertThat(dataExtractor.hasNext(), is(false)); + + // Check we cleared the scroll with the latest scroll id + List capturedClearScrollRequests = getCapturedClearScrollIds(); + assertThat(capturedClearScrollRequests.size(), equalTo(1)); + assertThat(capturedClearScrollRequests.get(0), equalTo(lastAndEmptyResponse.getScrollId())); + } + + public void testErrorOnSearchTwiceLeadsToFailure() { + TestExtractor dataExtractor = createExtractor(true); + + // First search will fail + dataExtractor.setNextResponse(createResponseWithShardFailures()); + // Next one fails again + dataExtractor.setNextResponse(createResponseWithShardFailures()); + + assertThat(dataExtractor.hasNext(), is(true)); + + expectThrows(RuntimeException.class, () -> dataExtractor.next()); + } + + public void testRecoveryFromErrorOnContinueScrollAfterRetry() throws IOException { + TestExtractor dataExtractor = createExtractor(true); + + // Search will succeed + SearchResponse response1 = createSearchResponse(Arrays.asList(1_1), Arrays.asList(2_1)); + dataExtractor.setNextResponse(response1); + + // But the first continue scroll fails + dataExtractor.setNextResponse(createResponseWithShardFailures()); + + // The next one succeeds and we shall recover + SearchResponse response2 = createSearchResponse(Arrays.asList(1_2), Arrays.asList(2_2)); + dataExtractor.setNextResponse(response2); + + // Last one + SearchResponse lastAndEmptyResponse = createEmptySearchResponse(); + dataExtractor.setNextResponse(lastAndEmptyResponse); + + assertThat(dataExtractor.hasNext(), is(true)); + + // First batch expected as normally since we'll retry after the error + Optional> rows = dataExtractor.next(); + assertThat(rows.isPresent(), is(true)); + assertThat(rows.get().size(), equalTo(1)); + assertThat(rows.get().get(0).getValues(), equalTo(new String[] {"11", "21"})); + assertThat(dataExtractor.hasNext(), is(true)); + + // We get second batch as we retried after the error + rows = dataExtractor.next(); + assertThat(rows.isPresent(), is(true)); + assertThat(rows.get().size(), equalTo(1)); + assertThat(rows.get().get(0).getValues(), equalTo(new String[] {"12", "22"})); + assertThat(dataExtractor.hasNext(), is(true)); + + // Next batch should return empty + rows = dataExtractor.next(); + assertThat(rows.isEmpty(), is(true)); + assertThat(dataExtractor.hasNext(), is(false)); + + // Notice we've done two searches and two continues here + assertThat(dataExtractor.capturedSearchRequests.size(), equalTo(2)); + assertThat(dataExtractor.capturedContinueScrollIds.size(), equalTo(2)); + + // Check we cleared the scroll with the latest scroll id + List capturedClearScrollRequests = getCapturedClearScrollIds(); + assertThat(capturedClearScrollRequests.size(), equalTo(1)); + assertThat(capturedClearScrollRequests.get(0), equalTo(lastAndEmptyResponse.getScrollId())); + } + + public void testErrorOnContinueScrollTwiceLeadsToFailure() throws IOException { + TestExtractor dataExtractor = createExtractor(true); + + // Search will succeed + SearchResponse response1 = createSearchResponse(Arrays.asList(1_1), Arrays.asList(2_1)); + dataExtractor.setNextResponse(response1); + + // But the first continue scroll fails + dataExtractor.setNextResponse(createResponseWithShardFailures()); + // As well as the second + dataExtractor.setNextResponse(createResponseWithShardFailures()); + + assertThat(dataExtractor.hasNext(), is(true)); + + // First batch expected as normally since we'll retry after the error + Optional> rows = dataExtractor.next(); + assertThat(rows.isPresent(), is(true)); + assertThat(rows.get().size(), equalTo(1)); + assertThat(rows.get().get(0).getValues(), equalTo(new String[] {"11", "21"})); + assertThat(dataExtractor.hasNext(), is(true)); + + // We get second batch as we retried after the error + expectThrows(RuntimeException.class, () -> dataExtractor.next()); + } + + private TestExtractor createExtractor(boolean includeSource) { + DataFrameDataExtractorContext context = new DataFrameDataExtractorContext( + JOB_ID, extractedFields, indices, query, scrollSize, headers, includeSource); + return new TestExtractor(client, context); + } + + private SearchResponse createSearchResponse(List field1Values, List field2Values) { + assertThat(field1Values.size(), equalTo(field2Values.size())); + SearchResponse searchResponse = mock(SearchResponse.class); + when(searchResponse.getScrollId()).thenReturn(randomAlphaOfLength(1000)); + List hits = new ArrayList<>(); + for (int i = 0; i < field1Values.size(); i++) { + SearchHit hit = new SearchHit(randomInt()); + Map fields = new HashMap<>(); + fields.put("field_1", new DocumentField("field_1", Collections.singletonList(field1Values.get(i)))); + fields.put("field_2", new DocumentField("field_2", Collections.singletonList(field2Values.get(i)))); + hit.fields(fields); + hits.add(hit); + } + SearchHits searchHits = new SearchHits(hits.toArray(new SearchHit[0]), new TotalHits(hits.size(), TotalHits.Relation.EQUAL_TO), 1); + when(searchResponse.getHits()).thenReturn(searchHits); + return searchResponse; + } + + private SearchResponse createEmptySearchResponse() { + return createSearchResponse(Collections.emptyList(), Collections.emptyList()); + } + + private SearchResponse createResponseWithShardFailures() { + SearchResponse searchResponse = mock(SearchResponse.class); + when(searchResponse.status()).thenReturn(RestStatus.OK); + when(searchResponse.getShardFailures()).thenReturn( + new ShardSearchFailure[] { new ShardSearchFailure(new RuntimeException("shard failed"))}); + when(searchResponse.getFailedShards()).thenReturn(1); + when(searchResponse.getScrollId()).thenReturn(randomAlphaOfLength(1000)); + return searchResponse; + } + + private List getCapturedClearScrollIds() { + return capturedClearScrollRequests.getAllValues().stream().map(r -> r.getScrollIds().get(0)).collect(Collectors.toList()); + } + + private static class TestExtractor extends DataFrameDataExtractor { + + private Queue responses = new LinkedList<>(); + private List capturedSearchRequests = new ArrayList<>(); + private List capturedContinueScrollIds = new ArrayList<>(); + + TestExtractor(Client client, DataFrameDataExtractorContext context) { + super(client, context); + } + + void setNextResponse(SearchResponse searchResponse) { + responses.add(searchResponse); + } + + @Override + protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequestBuilder) { + capturedSearchRequests.add(searchRequestBuilder); + SearchResponse searchResponse = responses.remove(); + if (searchResponse.getShardFailures() != null) { + throw new RuntimeException(searchResponse.getShardFailures()[0].getCause()); + } + return searchResponse; + } + + @Override + protected SearchResponse executeSearchScrollRequest(String scrollId) { + capturedContinueScrollIds.add(scrollId); + SearchResponse searchResponse = responses.remove(); + if (searchResponse.getShardFailures() != null) { + throw new RuntimeException(searchResponse.getShardFailures()[0].getCause()); + } + return searchResponse; + } + } +} From 575a5ef69df6d584ab972d2a914b55725dd22a29 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Fri, 7 Jun 2019 15:23:36 +0300 Subject: [PATCH 2/3] Try stabilising some flaky tests --- .../org/elasticsearch/client/MachineLearningIT.java | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index a5055b7e4f05d..60b4314c7b3d3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -1365,7 +1365,8 @@ public void testStartDataFrameAnalyticsConfig() throws Exception { String sourceIndex = "start-test-source-index"; String destIndex = "start-test-dest-index"; createIndex(sourceIndex, defaultMappingForTest()); - highLevelClient().index(new IndexRequest(sourceIndex).source(XContentType.JSON, "total", 10000), RequestOptions.DEFAULT); + highLevelClient().index(new IndexRequest(sourceIndex).source(XContentType.JSON, "total", 10000) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); // Verify that the destination index does not exist. Otherwise, analytics' reindexing step would fail. assertFalse(highLevelClient().indices().exists(new GetIndexRequest(destIndex), RequestOptions.DEFAULT)); @@ -1391,12 +1392,6 @@ public void testStartDataFrameAnalyticsConfig() throws Exception { new StartDataFrameAnalyticsRequest(configId), machineLearningClient::startDataFrameAnalytics, machineLearningClient::startDataFrameAnalyticsAsync); assertTrue(startDataFrameAnalyticsResponse.isAcknowledged()); - assertThat( - getAnalyticsState(configId), - anyOf( - equalTo(DataFrameAnalyticsState.STARTED), - equalTo(DataFrameAnalyticsState.REINDEXING), - equalTo(DataFrameAnalyticsState.ANALYZING))); // Wait for the analytics to stop. assertBusy(() -> assertThat(getAnalyticsState(configId), equalTo(DataFrameAnalyticsState.STOPPED)), 30, TimeUnit.SECONDS); @@ -1409,7 +1404,8 @@ public void testStopDataFrameAnalyticsConfig() throws Exception { String sourceIndex = "stop-test-source-index"; String destIndex = "stop-test-dest-index"; createIndex(sourceIndex, mappingForClassification()); - highLevelClient().index(new IndexRequest(sourceIndex).source(XContentType.JSON, "total", 10000), RequestOptions.DEFAULT); + highLevelClient().index(new IndexRequest(sourceIndex).source(XContentType.JSON, "total", 10000) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); // Verify that the destination index does not exist. Otherwise, analytics' reindexing step would fail. assertFalse(highLevelClient().indices().exists(new GetIndexRequest(destIndex), RequestOptions.DEFAULT)); From 83139b2e03d470f1fcfda66ec6fef11f3d021517 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Fri, 7 Jun 2019 15:57:49 +0300 Subject: [PATCH 3/3] Remove unused import --- .../test/java/org/elasticsearch/client/MachineLearningIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 60b4314c7b3d3..d6550964f9732 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -164,7 +164,6 @@ import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.CoreMatchers.hasItems; import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder;