From 36313a3982b7c8bf5729363a496ac6489d49b4cb Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Wed, 21 Aug 2019 15:35:27 -0400 Subject: [PATCH 1/3] Use collector manager for search when necessary When we optimize sort, we sort segments by their min/max value. As a collector expects to have segments in order, we can not use a single collector for sorted segments. Thus for such a case, we use collectorManager, where for every segment a dedicated collector will be created. TODO: on the Lucene side we need to make collector to except min competitive score --- .../search/internal/ContextIndexSearcher.java | 16 +- .../search/query/QueryPhase.java | 179 ++++++++++++------ .../search/query/QueryPhaseTests.java | 5 +- 3 files changed, 139 insertions(+), 61 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index 6ca399a5f276a..384d28a6bb169 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -28,6 +28,7 @@ import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.ConjunctionDISI; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; @@ -54,6 +55,7 @@ import org.elasticsearch.search.profile.query.QueryTimingType; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Set; @@ -138,8 +140,19 @@ private void checkCancelled() { } } + public void search(List leaves, Weight weight, CollectorManager manager) throws IOException { + final List collectors = new ArrayList<>(leaves.size()); + for (LeafReaderContext ctx : leaves) { + final Collector collector = manager.newCollector(); + //TODO: setMinCompetitveScore between Collectors + searchLeaf(ctx, weight, collector); + collectors.add(collector); + } + manager.reduce(collectors); + } + @Override - protected void search(List leaves, Weight weight, Collector collector) throws IOException { + public void search(List leaves, Weight weight, Collector collector) throws IOException { for (LeafReaderContext ctx : leaves) { // search each subreader searchLeaf(ctx, weight, collector); } @@ -228,6 +241,7 @@ public BulkScorer bulkScorer(LeafReaderContext context) throws IOException { } } + private static BitSet getSparseBitSetOrNull(Bits liveDocs) { if (liveDocs instanceof SparseFixedBitSet) { return (BitSet) liveDocs; diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java index 11f28c48f6f6c..32eab48367fe8 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -31,15 +31,18 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.Weight; @@ -71,6 +74,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.LinkedList; @@ -230,15 +234,10 @@ static boolean executeInternal(SearchContext searchContext) throws QueryPhaseExe // modify sorts: add sort on _score as 1st sort, and move the sort on the original field as the 2nd sort SortField[] oldSortFields = searchContext.sort().sort.getSort(); DocValueFormat[] oldFormats = searchContext.sort().formats; - SortField[] newSortFields = new SortField[oldSortFields.length + 2]; - DocValueFormat[] newFormats = new DocValueFormat[oldSortFields.length + 2]; + SortField[] newSortFields = new SortField[oldSortFields.length + 1]; + DocValueFormat[] newFormats = new DocValueFormat[oldSortFields.length + 1]; newSortFields[0] = SortField.FIELD_SCORE; newFormats[0] = DocValueFormat.RAW; - // Add a tiebreak on _doc in order to be able to search - // the leaves in any order. This is needed since we reorder - // the leaves based on the minimum value in each segment. - newSortFields[newSortFields.length-1] = SortField.FIELD_DOC; - newFormats[newSortFields.length-1] = DocValueFormat.RAW; System.arraycopy(oldSortFields, 0, newSortFields, 1, oldSortFields.length); System.arraycopy(oldFormats, 0, newFormats, 1, oldFormats.length); sortAndFormatsForRewrittenNumericSort = searchContext.sort(); // stash SortAndFormats to restore it later @@ -286,61 +285,20 @@ static boolean executeInternal(SearchContext searchContext) throws QueryPhaseExe } else { checkCancelled = null; } - searcher.setCheckCancelled(checkCancelled); - final boolean doProfile = searchContext.getProfilers() != null; - // create the top docs collector last when the other collectors are known - final TopDocsCollectorContext topDocsFactory = createTopDocsCollectorContext(searchContext, hasFilterCollector); - // add the top docs collector, the first collector context in the chain - collectors.addFirst(topDocsFactory); - - final Collector queryCollector; - if (doProfile) { - InternalProfileCollector profileCollector = QueryCollectorContext.createQueryCollectorWithProfiler(collectors); - searchContext.getProfilers().getCurrentQueryProfiler().setCollector(profileCollector); - queryCollector = profileCollector; + boolean shouldRescore; + // if we are optimizing sort and there are no other collectors + if (sortAndFormatsForRewrittenNumericSort != null && collectors.size() == 0 && searchContext.getProfilers() == null) { + shouldRescore = searchWithCollectorManager(searchContext, searcher, query, leafSorter, timeoutSet); } else { - queryCollector = QueryCollectorContext.createQueryCollector(collectors); - } - - try { - Weight weight = searcher.createWeight(searcher.rewrite(query), queryCollector.scoreMode(), 1f); - // We search the leaves in a different order when the numeric sort optimization is - // activated. Collectors expect leaves in order when searching but this is fine in this - // case since we only have a TopFieldCollector and we force the tiebreak on _doc. - List leaves = new ArrayList<>(searcher.getIndexReader().leaves()); - leafSorter.accept(leaves); - for (LeafReaderContext ctx : leaves) { - searcher.searchLeaf(ctx, weight, queryCollector); - } - } catch (EarlyTerminatingCollector.EarlyTerminationException e) { - queryResult.terminatedEarly(true); - } catch (TimeExceededException e) { - assert timeoutSet : "TimeExceededException thrown even though timeout wasn't set"; - - if (searchContext.request().allowPartialSearchResults() == false) { - // Can't rethrow TimeExceededException because not serializable - throw new QueryPhaseExecutionException(searchContext, "Time exceeded"); - } - queryResult.searchTimedOut(true); - } finally { - searchContext.clearReleasables(SearchContext.Lifetime.COLLECTION); - } - if (searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER - && queryResult.terminatedEarly() == null) { - queryResult.terminatedEarly(false); - } - - final QuerySearchResult result = searchContext.queryResult(); - for (QueryCollectorContext ctx : collectors) { - ctx.postProcess(result); + shouldRescore = searchWithCollector(searchContext, searcher, query, collectors, hasFilterCollector, timeoutSet); } // if we rewrote numeric long or date sort, restore fieldDocs based on the original sort if (sortAndFormatsForRewrittenNumericSort != null) { searchContext.sort(sortAndFormatsForRewrittenNumericSort); // restore SortAndFormats - restoreTopFieldDocs(result, sortAndFormatsForRewrittenNumericSort); + restoreTopFieldDocs(queryResult, sortAndFormatsForRewrittenNumericSort); } ExecutorService executor = searchContext.indexShard().getThreadPool().executor(ThreadPool.Names.SEARCH); @@ -351,14 +309,119 @@ static boolean executeInternal(SearchContext searchContext) throws QueryPhaseExe } if (searchContext.getProfilers() != null) { ProfileShardResult shardResults = SearchProfileShardResults.buildShardResults(searchContext.getProfilers()); - result.profileResults(shardResults); + queryResult.profileResults(shardResults); } - return topDocsFactory.shouldRescore(); + return shouldRescore; } catch (Exception e) { throw new QueryPhaseExecutionException(searchContext, "Failed to execute main query", e); } } + private static boolean searchWithCollector(SearchContext searchContext, ContextIndexSearcher searcher, Query query, + LinkedList collectors, boolean hasFilterCollector, boolean timeoutSet) throws IOException { + // create the top docs collector last when the other collectors are known + final TopDocsCollectorContext topDocsFactory = createTopDocsCollectorContext(searchContext, hasFilterCollector); + // add the top docs collector, the first collector context in the chain + collectors.addFirst(topDocsFactory); + + final Collector queryCollector; + if ( searchContext.getProfilers() != null) { + InternalProfileCollector profileCollector = QueryCollectorContext.createQueryCollectorWithProfiler(collectors); + searchContext.getProfilers().getCurrentQueryProfiler().setCollector(profileCollector); + queryCollector = profileCollector; + } else { + queryCollector = QueryCollectorContext.createQueryCollector(collectors); + } + QuerySearchResult queryResult = searchContext.queryResult(); + try { + Weight weight = searcher.createWeight(searcher.rewrite(query), queryCollector.scoreMode(), 1f); + searcher.search(searcher.getIndexReader().leaves(), weight, queryCollector); + } catch (EarlyTerminatingCollector.EarlyTerminationException e) { + queryResult.terminatedEarly(true); + } catch (TimeExceededException e) { + assert timeoutSet : "TimeExceededException thrown even though timeout wasn't set"; + if (searchContext.request().allowPartialSearchResults() == false) { + // Can't rethrow TimeExceededException because not serializable + throw new QueryPhaseExecutionException(searchContext, "Time exceeded"); + } + queryResult.searchTimedOut(true); + } finally { + searchContext.clearReleasables(SearchContext.Lifetime.COLLECTION); + } + if (searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER && queryResult.terminatedEarly() == null) { + queryResult.terminatedEarly(false); + } + for (QueryCollectorContext ctx : collectors) { + ctx.postProcess(queryResult); + } + return topDocsFactory.shouldRescore(); + } + + // we use collectorManager during sort optimization + // for the sort optimization, we have already checked that there are no other collectors, no filters, + // no search after, no scroll, no collapse, no track scores + // this means we can use TopFieldCollector directly + private static boolean searchWithCollectorManager(SearchContext searchContext, ContextIndexSearcher searcher, Query query, + CheckedConsumer, IOException> leafSorter, boolean timeoutSet) throws IOException { + final IndexReader reader = searchContext.searcher().getIndexReader(); + final int numHits = Math.min(searchContext.from() + searchContext.size(), Math.max(1, reader.numDocs())); + final SortAndFormats sortAndFormats = searchContext.sort(); + + ScoreMode scoreMode = ScoreMode.TOP_SCORES; + int hitCount = 0; + if (searchContext.trackTotalHitsUpTo() != SearchContext.TRACK_TOTAL_HITS_DISABLED) { + hitCount = shortcutTotalHitCount(reader, query); + if (searchContext.trackTotalHitsUpTo() == Integer.MAX_VALUE) { + scoreMode = ScoreMode.COMPLETE; //TODO: not sure if scoreMode should always be TOP_SCORES + } + } + final int totalHitsThreshold = hitCount == -1 ? searchContext.trackTotalHitsUpTo() : 1; + final TotalHits totalHits = hitCount == -1 ? null : new TotalHits(hitCount, TotalHits.Relation.EQUAL_TO); + + CollectorManager manager = new CollectorManager<>() { + @Override + public TopFieldCollector newCollector() throws IOException { + return TopFieldCollector.create(sortAndFormats.sort, numHits, null, totalHitsThreshold); + } + @Override + public Void reduce(Collection collectors) throws IOException { + TopFieldDocs[] topDocsArr = new TopFieldDocs[collectors.size()]; + int i = 0; + for (TopFieldCollector collector : collectors) { + topDocsArr[i++] = collector.topDocs(); + } + // we have to set setShardIndex to true, as Lucene can't have ScoreDocs without shardIndex set + TopFieldDocs mergedTopDocs = TopDocs.merge(sortAndFormats.sort, 0, numHits, topDocsArr, true); + // reset shard index for all topDocs; ES will set shard index later during reduce stage + for (ScoreDoc scoreDoc : mergedTopDocs.scoreDocs) { + scoreDoc.shardIndex = -1; + } + if (totalHits != null) { // we have already precalculated totalHits for the whole index + mergedTopDocs = new TopFieldDocs(totalHits, mergedTopDocs.scoreDocs, mergedTopDocs.fields); + } + searchContext.queryResult().topDocs(new TopDocsAndMaxScore(mergedTopDocs, Float.NaN), sortAndFormats.formats); + return null; + } + }; + + List leaves = new ArrayList<>(searcher.getIndexReader().leaves()); + leafSorter.accept(leaves); + try { + Weight weight = searcher.createWeight(searcher.rewrite(query), scoreMode, 1f); + searcher.search(leaves, weight, manager); + } catch (TimeExceededException e) { + assert timeoutSet : "TimeExceededException thrown even though timeout wasn't set"; + if (searchContext.request().allowPartialSearchResults() == false) { + // Can't rethrow TimeExceededException because not serializable + throw new QueryPhaseExecutionException(searchContext, "Time exceeded"); + } + searchContext.queryResult().searchTimedOut(true); + } finally { + searchContext.clearReleasables(SearchContext.Lifetime.COLLECTION); + } + return false; // no rescoring when sorting by field + } + private static Query tryRewriteLongSort(SearchContext searchContext, IndexReader reader, Query query, boolean hasFilterCollector) throws IOException { if (searchContext.searchAfter() != null) return null; @@ -399,7 +462,7 @@ private static Query tryRewriteLongSort(SearchContext searchContext, IndexReader if (missingValuesAccordingToSort == false) return null; int docCount = PointValues.getDocCount(reader, fieldName); - // is not worth to run optimization on small index + // is not worth to run optimization on small index if (docCount <= 512) return null; // check for multiple values @@ -470,7 +533,7 @@ static void restoreTopFieldDocs(QuerySearchResult result, SortAndFormats origina TopDocs topDocs = result.topDocs().topDocs; for (ScoreDoc scoreDoc : topDocs.scoreDocs) { FieldDoc fieldDoc = (FieldDoc) scoreDoc; - fieldDoc.fields = Arrays.copyOfRange(fieldDoc.fields, 1, fieldDoc.fields.length-1); + fieldDoc.fields = Arrays.copyOfRange(fieldDoc.fields, 1, fieldDoc.fields.length); } TopFieldDocs newTopDocs = new TopFieldDocs(topDocs.totalHits, topDocs.scoreDocs, originalSortAndFormats.sort.getSort()); result.topDocs(new TopDocsAndMaxScore(newTopDocs, Float.NaN), originalSortAndFormats.formats); diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index cfb1083a9cbae..6e2490a49aba4 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -886,8 +886,9 @@ private static ContextIndexSearcher newEarlyTerminationContextSearcher(IndexRead IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy()) { @Override - protected void search(List leaves, Weight weight, Collector collector) throws IOException { - throw new AssertionError(); + public void search(List leaves, Weight weight, Collector collector) throws IOException { + final Collector in = new AssertingEarlyTerminationFilterCollector(collector, size); + super.search(leaves, weight, in); } @Override From 8e054be52f60099bb3383679849d4c8d5bbcf750 Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Wed, 28 Aug 2019 14:26:08 -0400 Subject: [PATCH 2/3] Address Jim's comments --- .../search/internal/ContextIndexSearcher.java | 4 +- .../search/query/QueryPhase.java | 37 ++++++++++++------- .../search/SearchCancellationTests.java | 8 +--- .../search/query/QueryPhaseTests.java | 16 ++------ 4 files changed, 30 insertions(+), 35 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index 384d28a6bb169..5265372e8fe4a 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -152,7 +152,7 @@ public void search(List leaves, Weight weight, CollectorManag } @Override - public void search(List leaves, Weight weight, Collector collector) throws IOException { + protected void search(List leaves, Weight weight, Collector collector) throws IOException { for (LeafReaderContext ctx : leaves) { // search each subreader searchLeaf(ctx, weight, collector); } @@ -164,7 +164,7 @@ public void search(List leaves, Weight weight, Collector coll * {@link LeafCollector#collect(int)} is called for every matching document in * the provided ctx. */ - public void searchLeaf(LeafReaderContext ctx, Weight weight, Collector collector) throws IOException { + private void searchLeaf(LeafReaderContext ctx, Weight weight, Collector collector) throws IOException { checkCancelled(); weight = wrapWeight(weight); final LeafCollector leafCollector; diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java index 32eab48367fe8..fd31e1a137d1f 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -234,10 +234,15 @@ static boolean executeInternal(SearchContext searchContext) throws QueryPhaseExe // modify sorts: add sort on _score as 1st sort, and move the sort on the original field as the 2nd sort SortField[] oldSortFields = searchContext.sort().sort.getSort(); DocValueFormat[] oldFormats = searchContext.sort().formats; - SortField[] newSortFields = new SortField[oldSortFields.length + 1]; - DocValueFormat[] newFormats = new DocValueFormat[oldSortFields.length + 1]; + SortField[] newSortFields = new SortField[oldSortFields.length + 2]; + DocValueFormat[] newFormats = new DocValueFormat[oldSortFields.length + 2]; newSortFields[0] = SortField.FIELD_SCORE; newFormats[0] = DocValueFormat.RAW; + // Add a tiebreak on _doc in order to be able to search + // the leaves in any order. This is needed since we reorder + // the leaves based on the minimum/maxim value in each segment. + newSortFields[newSortFields.length-1] = SortField.FIELD_DOC; + newFormats[newSortFields.length-1] = DocValueFormat.RAW; System.arraycopy(oldSortFields, 0, newSortFields, 1, oldSortFields.length); System.arraycopy(oldFormats, 0, newFormats, 1, oldFormats.length); sortAndFormatsForRewrittenNumericSort = searchContext.sort(); // stash SortAndFormats to restore it later @@ -334,8 +339,7 @@ private static boolean searchWithCollector(SearchContext searchContext, ContextI } QuerySearchResult queryResult = searchContext.queryResult(); try { - Weight weight = searcher.createWeight(searcher.rewrite(query), queryCollector.scoreMode(), 1f); - searcher.search(searcher.getIndexReader().leaves(), weight, queryCollector); + searcher.search(query, queryCollector); } catch (EarlyTerminatingCollector.EarlyTerminationException e) { queryResult.terminatedEarly(true); } catch (TimeExceededException e) { @@ -367,16 +371,21 @@ private static boolean searchWithCollectorManager(SearchContext searchContext, C final int numHits = Math.min(searchContext.from() + searchContext.size(), Math.max(1, reader.numDocs())); final SortAndFormats sortAndFormats = searchContext.sort(); - ScoreMode scoreMode = ScoreMode.TOP_SCORES; - int hitCount = 0; - if (searchContext.trackTotalHitsUpTo() != SearchContext.TRACK_TOTAL_HITS_DISABLED) { - hitCount = shortcutTotalHitCount(reader, query); - if (searchContext.trackTotalHitsUpTo() == Integer.MAX_VALUE) { - scoreMode = ScoreMode.COMPLETE; //TODO: not sure if scoreMode should always be TOP_SCORES + int totalHitsThreshold; + TotalHits totalHits; + if (searchContext.trackTotalHitsUpTo() == SearchContext.TRACK_TOTAL_HITS_DISABLED) { + totalHitsThreshold = 1; + totalHits = new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); + } else { + int hitCount = shortcutTotalHitCount(reader, query); + if (hitCount == -1) { + totalHitsThreshold = searchContext.trackTotalHitsUpTo(); + totalHits = null; // will be computed via the collector + } else { + totalHitsThreshold = 1; + totalHits = new TotalHits(hitCount, TotalHits.Relation.EQUAL_TO); // don't compute hit counts via the collector } } - final int totalHitsThreshold = hitCount == -1 ? searchContext.trackTotalHitsUpTo() : 1; - final TotalHits totalHits = hitCount == -1 ? null : new TotalHits(hitCount, TotalHits.Relation.EQUAL_TO); CollectorManager manager = new CollectorManager<>() { @Override @@ -407,7 +416,7 @@ public Void reduce(Collection collectors) throws IOException List leaves = new ArrayList<>(searcher.getIndexReader().leaves()); leafSorter.accept(leaves); try { - Weight weight = searcher.createWeight(searcher.rewrite(query), scoreMode, 1f); + Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.TOP_SCORES, 1f); searcher.search(leaves, weight, manager); } catch (TimeExceededException e) { assert timeoutSet : "TimeExceededException thrown even though timeout wasn't set"; @@ -533,7 +542,7 @@ static void restoreTopFieldDocs(QuerySearchResult result, SortAndFormats origina TopDocs topDocs = result.topDocs().topDocs; for (ScoreDoc scoreDoc : topDocs.scoreDocs) { FieldDoc fieldDoc = (FieldDoc) scoreDoc; - fieldDoc.fields = Arrays.copyOfRange(fieldDoc.fields, 1, fieldDoc.fields.length); + fieldDoc.fields = Arrays.copyOfRange(fieldDoc.fields, 1, fieldDoc.fields.length-1); } TopFieldDocs newTopDocs = new TopFieldDocs(topDocs.totalHits, topDocs.scoreDocs, originalSortAndFormats.sort.getSort()); result.topDocs(new TopDocsAndMaxScore(newTopDocs, Float.NaN), originalSortAndFormats.formats); diff --git a/server/src/test/java/org/elasticsearch/search/SearchCancellationTests.java b/server/src/test/java/org/elasticsearch/search/SearchCancellationTests.java index 4dbacc8ec87de..1fc7a0dbac275 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchCancellationTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchCancellationTests.java @@ -88,13 +88,9 @@ public void testCancellableCollector() throws IOException { throw new TaskCancelledException("cancelled"); } }); - LeafReaderContext leafContext = reader.leaves().get(0); - final Weight weight = searcher.createWeight(new MatchAllDocsQuery(), ScoreMode.COMPLETE, 1f); - searcher.searchLeaf(searcher.getIndexReader().leaves().get(0), weight, collector); - assertThat(collector.getTotalHits(), equalTo(leafContext.reader().numDocs())); + searcher.search(new MatchAllDocsQuery(), collector); + assertThat(collector.getTotalHits(), equalTo(reader.numDocs())); cancelled.set(true); - expectThrows(TaskCancelledException.class, - () -> searcher.searchLeaf(searcher.getIndexReader().leaves().get(0), weight, collector)); expectThrows(TaskCancelledException.class, () -> searcher.search(new MatchAllDocsQuery(), collector)); } diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index 6e2490a49aba4..b1da27ecbd60d 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -42,6 +42,7 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FieldComparator; @@ -890,12 +891,6 @@ public void search(List leaves, Weight weight, Collector coll final Collector in = new AssertingEarlyTerminationFilterCollector(collector, size); super.search(leaves, weight, in); } - - @Override - public void searchLeaf(LeafReaderContext ctx, Weight weight, Collector collector) throws IOException { - collector = new AssertingEarlyTerminationFilterCollector(collector, size); - super.searchLeaf(ctx, weight, collector); - } }; } @@ -905,12 +900,7 @@ private static ContextIndexSearcher newOptimizedContextSearcher(IndexReader read IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy()) { @Override - public void search(Query query, Collector results) throws IOException { - throw new AssertionError(); - } - - @Override - public void searchLeaf(LeafReaderContext ctx, Weight weight, Collector collector) throws IOException { + public void search(List leaves, Weight weight, CollectorManager manager) throws IOException { final Query query = weight.getQuery(); assertTrue(query instanceof BooleanQuery); List clauses = ((BooleanQuery) query).clauses(); @@ -923,7 +913,7 @@ public void searchLeaf(LeafReaderContext ctx, Weight weight, Collector collector ); } if (queryType == 1) assertTrue(clauses.get(1).getQuery() instanceof DocValuesFieldExistsQuery); - super.searchLeaf(ctx, weight, collector); + super.search(leaves, weight, manager); } }; } From 816b243395323bddce533535978852425b2e3204 Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Thu, 29 Aug 2019 06:21:28 -0400 Subject: [PATCH 3/3] Correct check style --- .../java/org/elasticsearch/search/SearchCancellationTests.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/search/SearchCancellationTests.java b/server/src/test/java/org/elasticsearch/search/SearchCancellationTests.java index 1fc7a0dbac275..cdbe140b0f83c 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchCancellationTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchCancellationTests.java @@ -22,14 +22,11 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TotalHitCountCollector; -import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.TestUtil;