Skip to content
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.function.Function;
import java.util.function.LongSupplier;

import static org.elasticsearch.common.time.DateUtils.toLong;
Expand Down Expand Up @@ -490,6 +491,14 @@ public Relation isFieldWithinQuery(IndexReader reader,
}
}

@Override
public Function<byte[], Number> pointReaderIfPossible() {
if (isSearchable()) {
return resolution()::parsePointAsMillis;
}
return null;
}

@Override
public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) {
failIfNoDocValues();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.function.Function;

/**
* This defines the core properties and functions to operate on a field.
Expand Down Expand Up @@ -129,7 +130,7 @@ public int hashCode() {

/** Returns the name of this type, as would be specified in mapping properties */
public abstract String typeName();

/** Returns the field family type, as used in field capabilities */
public String familyTypeName() {
return typeName();
Expand Down Expand Up @@ -173,6 +174,17 @@ public boolean isSearchable() {
return isIndexed;
}

/**
* If the field supports using the indexed data to speed up operations related to ordering of data, such as sorting or aggs, return
* a function for doing that. If it is unsupported for this field type, there is no need to override this method.
*
* @return null if the optimization cannot be applied, otherwise a function to use for the optimization
*/
@Nullable
public Function<byte[], Number> pointReaderIfPossible() {
return null;
}

/** Returns true if the field is aggregatable.
*
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.function.Function;

/** A {@link FieldMapper} for numeric types: byte, short, int, long, float and double. */
public class NumberFieldMapper extends FieldMapper {
Expand Down Expand Up @@ -971,6 +972,14 @@ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower
return query;
}

@Override
public Function<byte[], Number> pointReaderIfPossible() {
if (isSearchable()) {
return this::parsePoint;
}
return null;
}

@Override
public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) {
failIfNoDocValues();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,13 @@
package org.elasticsearch.search.aggregations;

import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.ScoreMode;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.breaker.CircuitBreakingException;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
import org.elasticsearch.search.query.QueryPhaseExecutionException;
Expand All @@ -34,6 +36,7 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;

/**
* Base implementation for concrete aggregators.
Expand Down Expand Up @@ -106,6 +109,26 @@ public ScoreMode scoreMode() {
addRequestCircuitBreakerBytes(DEFAULT_WEIGHT);
}

/**
* Returns a converter for point values if it's safe to use the indexed data instead of
* doc values. Generally, this means that the query has no filters or scripts, the aggregation is
* top level, and the underlying field is indexed, and the index is sorted in the right order.
*
* If those conditions aren't met, return <code>null</code> to indicate a point reader cannot
* be used in this case.
*
* @param config The config for the values source metric.
*/
public final Function<byte[], Number> pointReaderIfAvailable(ValuesSourceConfig config) {
if (context.query() != null && context.query().getClass() != MatchAllDocsQuery.class) {
return null;
}
if (parent != null) {
return null;
}
return config.getPointReaderOrNull();
}

/**
* Increment or decrement the number of bytes that have been allocated to service
* this request and potentially trigger a {@link CircuitBreakingException}. The
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,6 @@
import java.util.Map;
import java.util.function.Function;

import static org.elasticsearch.search.aggregations.metrics.MinAggregator.getPointReaderOrNull;

class MaxAggregator extends NumericMetricsAggregator.SingleValue {

final ValuesSource.Numeric valuesSource;
Expand All @@ -68,7 +66,7 @@ class MaxAggregator extends NumericMetricsAggregator.SingleValue {
maxes.fill(0, maxes.size(), Double.NEGATIVE_INFINITY);
}
this.formatter = config.format();
this.pointConverter = getPointReaderOrNull(context, parent, config);
this.pointConverter = pointReaderIfAvailable(config);
if (pointConverter != null) {
pointField = config.fieldContext().field();
} else {
Expand Down Expand Up @@ -96,7 +94,7 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
Number segMax = findLeafMaxValue(ctx.reader(), pointField, pointConverter);
if (segMax != null) {
/*
* There is no parent aggregator (see {@link MinAggregator#getPointReaderOrNull}
* There is no parent aggregator (see {@link AggregatorBase#getPointReaderOrNull}
* so the ordinal for the bucket is always 0.
*/
assert maxes.size() == 1;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,17 +22,13 @@
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.PointValues;
import org.apache.lucene.search.CollectionTerminatedException;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.util.Bits;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.DoubleArray;
import org.elasticsearch.index.fielddata.NumericDoubleValues;
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.NumberFieldMapper;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.MultiValueMode;
import org.elasticsearch.search.aggregations.Aggregator;
Expand Down Expand Up @@ -71,7 +67,7 @@ class MinAggregator extends NumericMetricsAggregator.SingleValue {
mins.fill(0, mins.size(), Double.POSITIVE_INFINITY);
}
this.format = config.format();
this.pointConverter = getPointReaderOrNull(context, parent, config);
this.pointConverter = pointReaderIfAvailable(config);
if (pointConverter != null) {
pointField = config.fieldContext().field();
} else {
Expand Down Expand Up @@ -159,40 +155,6 @@ public void doClose() {
}


/**
* Returns a converter for point values if early termination is applicable to
* the context or <code>null</code> otherwise.
*
* @param context The {@link SearchContext} of the aggregation.
* @param parent The parent aggregator.
* @param config The config for the values source metric.
*/
static Function<byte[], Number> getPointReaderOrNull(SearchContext context, Aggregator parent,
ValuesSourceConfig config) {
if (context.query() != null &&
context.query().getClass() != MatchAllDocsQuery.class) {
return null;
}
if (parent != null) {
return null;
}
if (config.fieldContext() != null && config.script() == null && config.missing() == null) {
MappedFieldType fieldType = config.fieldContext().fieldType();
if (fieldType == null || fieldType.isSearchable() == false) {
return null;
}
Function<byte[], Number> converter = null;
if (fieldType instanceof NumberFieldMapper.NumberFieldType) {
converter = ((NumberFieldMapper.NumberFieldType) fieldType)::parsePoint;
} else if (fieldType.getClass() == DateFieldMapper.DateFieldType.class) {
DateFieldMapper.DateFieldType dft = (DateFieldMapper.DateFieldType) fieldType;
converter = dft.resolution()::parsePointAsMillis;
}
return converter;
}
return null;
}

/**
* Returns the minimum value indexed in the <code>fieldName</code> field or <code>null</code>
* if the value cannot be inferred from the indexed {@link PointValues}.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
import org.elasticsearch.search.DocValueFormat;

import java.time.ZoneId;
import java.util.function.Function;
import java.util.function.LongSupplier;

/**
Expand Down Expand Up @@ -372,6 +373,23 @@ public boolean hasGlobalOrdinals() {
return valuesSource.hasGlobalOrdinals();
}

/**
* This method is used when an aggregation can optimize by using the indexed data instead of the doc values. We check to see if the
* indexed data will match the values source output (meaning there isn't a script or a missing value, since both could modify the
* value at read time). If the settings allow for it, we then ask the {@link ValuesSourceType} to build the actual point reader
* based on the field type. This allows for a point of extensibility in plugins.
*
* @return null if we cannot apply the optimization, otherwise the point reader function.
*/
@Nullable
public Function<byte[], Number> getPointReaderOrNull() {
MappedFieldType fieldType = fieldType();
if (fieldType != null && script() == null && missing() == null) {
return fieldType.pointReaderIfPossible();
}
return null;
}

/**
* Returns a human readable description of this values source, for use in error messages and similar.
*/
Expand Down
Loading