Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ml.integration;

import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.xpack.core.ml.action.ExplainDataFrameAnalyticsAction;
import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig;
import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsSource;
import org.elasticsearch.xpack.core.ml.dataframe.analyses.Classification;
import org.elasticsearch.xpack.core.ml.utils.QueryProvider;

import java.io.IOException;

import static org.hamcrest.Matchers.lessThanOrEqualTo;

public class ExplainDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTestCase {

public void testSourceQueryIsApplied() throws IOException {
// To test the source query is applied when we extract data,
// we set up a job where we have a query which excludes all but one document.
// We then assert the memory estimation is low enough.

String sourceIndex = "test-source-query-is-applied";

client().admin().indices().prepareCreate(sourceIndex)
.addMapping("_doc", "numeric_1", "type=double", "numeric_2", "type=float", "categorical", "type=keyword")
.get();

BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);

for (int i = 0; i < 30; i++) {
IndexRequest indexRequest = new IndexRequest(sourceIndex);

// We insert one odd value out of 5 for one feature
indexRequest.source("numeric_1", 1.0, "numeric_2", 2.0, "categorical", i == 0 ? "only-one" : "normal");
bulkRequestBuilder.add(indexRequest);
}
BulkResponse bulkResponse = bulkRequestBuilder.get();
if (bulkResponse.hasFailures()) {
fail("Failed to index data: " + bulkResponse.buildFailureMessage());
}

String id = "test_source_query_is_applied";

DataFrameAnalyticsConfig config = new DataFrameAnalyticsConfig.Builder()
.setId(id)
.setSource(new DataFrameAnalyticsSource(new String[] { sourceIndex },
QueryProvider.fromParsedQuery(QueryBuilders.termQuery("categorical", "only-one"))))
.setAnalysis(new Classification("categorical"))
.buildForExplain();

ExplainDataFrameAnalyticsAction.Response explainResponse = explainDataFrame(config);

assertThat(explainResponse.getMemoryEstimation().getExpectedMemoryWithoutDisk().getKb(), lessThanOrEqualTo(500L));
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm wondering if

assertThat(explainResponse.getMemoryEstimation().getExpectedMemoryWithoutDisk(), lessThanOrEqualTo(new ByteSizeValue(500, KB)));

would work as well.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It does but it makes the line longer and I don't think it significantly improves readability.

}
}
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.xpack.core.ml.action.DeleteDataFrameAnalyticsAction;
import org.elasticsearch.xpack.core.ml.action.EvaluateDataFrameAction;
import org.elasticsearch.xpack.core.ml.action.ExplainDataFrameAnalyticsAction;
import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsAction;
import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsStatsAction;
import org.elasticsearch.xpack.core.ml.action.PutDataFrameAnalyticsAction;
Expand Down Expand Up @@ -146,6 +147,11 @@ protected GetDataFrameAnalyticsStatsAction.Response.Stats getAnalyticsStats(Stri
return stats.get(0);
}

protected ExplainDataFrameAnalyticsAction.Response explainDataFrame(DataFrameAnalyticsConfig config) {
PutDataFrameAnalyticsAction.Request request = new PutDataFrameAnalyticsAction.Request(config);
return client().execute(ExplainDataFrameAnalyticsAction.INSTANCE, request).actionGet();
}

protected EvaluateDataFrameAction.Response evaluateDataFrame(String index, Evaluation evaluation) {
EvaluateDataFrameAction.Request request =
new EvaluateDataFrameAction.Request()
Expand All @@ -156,12 +162,12 @@ protected EvaluateDataFrameAction.Response evaluateDataFrame(String index, Evalu

protected static DataFrameAnalyticsConfig buildAnalytics(String id, String sourceIndex, String destIndex,
@Nullable String resultsField, DataFrameAnalysis analysis) {
DataFrameAnalyticsConfig.Builder configBuilder = new DataFrameAnalyticsConfig.Builder();
configBuilder.setId(id);
configBuilder.setSource(new DataFrameAnalyticsSource(new String[] { sourceIndex }, null));
configBuilder.setDest(new DataFrameAnalyticsDest(destIndex, resultsField));
configBuilder.setAnalysis(analysis);
return configBuilder.build();
return new DataFrameAnalyticsConfig.Builder()
.setId(id)
.setSource(new DataFrameAnalyticsSource(new String[] { sourceIndex }, null))
.setDest(new DataFrameAnalyticsDest(destIndex, resultsField))
.setAnalysis(analysis)
.build();
}

protected void assertIsStopped(String id) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,18 @@ public class DataFrameDataExtractorFactory {
private final Client client;
private final String analyticsId;
private final List<String> indices;
private final QueryBuilder sourceQuery;
private final ExtractedFields extractedFields;
private final Map<String, String> headers;
private final boolean includeRowsWithMissingValues;

public DataFrameDataExtractorFactory(Client client, String analyticsId, List<String> indices, ExtractedFields extractedFields,
Map<String, String> headers, boolean includeRowsWithMissingValues) {
private DataFrameDataExtractorFactory(Client client, String analyticsId, List<String> indices, QueryBuilder sourceQuery,
ExtractedFields extractedFields, Map<String, String> headers,
boolean includeRowsWithMissingValues) {
this.client = Objects.requireNonNull(client);
this.analyticsId = Objects.requireNonNull(analyticsId);
this.indices = Objects.requireNonNull(indices);
this.sourceQuery = Objects.requireNonNull(sourceQuery);
this.extractedFields = Objects.requireNonNull(extractedFields);
this.headers = headers;
this.includeRowsWithMissingValues = includeRowsWithMissingValues;
Expand All @@ -54,7 +57,12 @@ public DataFrameDataExtractor newExtractor(boolean includeSource) {
}

private QueryBuilder createQuery() {
return includeRowsWithMissingValues ? QueryBuilders.matchAllQuery() : allExtractedFieldsExistQuery();
BoolQueryBuilder query = QueryBuilders.boolQuery();
query.filter(sourceQuery);
if (includeRowsWithMissingValues == false) {
query.filter(allExtractedFieldsExistQuery());
}
return query;
}

private QueryBuilder allExtractedFieldsExistQuery() {
Expand All @@ -77,8 +85,8 @@ private QueryBuilder allExtractedFieldsExistQuery() {
*/
public static DataFrameDataExtractorFactory createForSourceIndices(Client client, String taskId, DataFrameAnalyticsConfig config,
ExtractedFields extractedFields) {
return new DataFrameDataExtractorFactory(client, taskId, Arrays.asList(config.getSource().getIndex()), extractedFields,
config.getHeaders(), config.getAnalysis().supportsMissingValues());
return new DataFrameDataExtractorFactory(client, taskId, Arrays.asList(config.getSource().getIndex()),
config.getSource().getParsedQuery(), extractedFields, config.getHeaders(), config.getAnalysis().supportsMissingValues());
}

/**
Expand All @@ -100,8 +108,8 @@ public static void createForDestinationIndex(Client client,
extractedFieldsDetector -> {
ExtractedFields extractedFields = extractedFieldsDetector.detect().v1();
DataFrameDataExtractorFactory extractorFactory = new DataFrameDataExtractorFactory(client, config.getId(),
Collections.singletonList(config.getDest().getIndex()), extractedFields, config.getHeaders(),
config.getAnalysis().supportsMissingValues());
Collections.singletonList(config.getDest().getIndex()), config.getSource().getParsedQuery(), extractedFields,
config.getHeaders(), config.getAnalysis().supportsMissingValues());
listener.onResponse(extractorFactory);
},
listener::onFailure
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
- match: { count: 1 }
- match: { data_frame_analytics.0.id: "old_cluster_outlier_detection_job" }
- match: { data_frame_analytics.0.source.index: ["bwc_ml_outlier_detection_job_source"] }
- match: { data_frame_analytics.0.source.query: {"term" : { "user" : "Kimchy" }} }
- match: { data_frame_analytics.0.source.query: {"term" : { "user.keyword" : "Kimchy" }} }
- match: { data_frame_analytics.0.dest.index: "old_cluster_outlier_detection_job_results" }
- match: { data_frame_analytics.0.analysis: {
"outlier_detection":{
Expand Down Expand Up @@ -56,7 +56,7 @@
- match: { count: 1 }
- match: { data_frame_analytics.0.id: "old_cluster_regression_job" }
- match: { data_frame_analytics.0.source.index: ["bwc_ml_regression_job_source"] }
- match: { data_frame_analytics.0.source.query: {"term": { "user": "Kimchy" }} }
- match: { data_frame_analytics.0.source.query: {"term": { "user.keyword": "Kimchy" }} }
- match: { data_frame_analytics.0.dest.index: "old_cluster_regression_job_results" }
- match: { data_frame_analytics.0.analysis.regression.dependent_variable: "foo" }
- match: { data_frame_analytics.0.analysis.regression.training_percent: 100.0 }
Expand Down Expand Up @@ -101,7 +101,7 @@
{
"source": {
"index": "bwc_ml_outlier_detection_job_source",
"query": {"term" : { "user" : "Kimchy" }}
"query": {"term" : { "user.keyword" : "Kimchy" }}
},
"dest": {
"index": "mixed_cluster_outlier_detection_job_results"
Expand All @@ -116,7 +116,7 @@
- match: { count: 1 }
- match: { data_frame_analytics.0.id: "mixed_cluster_outlier_detection_job" }
- match: { data_frame_analytics.0.source.index: ["bwc_ml_outlier_detection_job_source"] }
- match: { data_frame_analytics.0.source.query: {"term": { "user": "Kimchy" }} }
- match: { data_frame_analytics.0.source.query: {"term": { "user.keyword": "Kimchy" }} }
- match: { data_frame_analytics.0.dest.index: "mixed_cluster_outlier_detection_job_results" }
- match: { data_frame_analytics.0.analysis: {
"outlier_detection":{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@ setup:
index: bwc_ml_outlier_detection_job_source
body: >
{
"numeric_field_1": 42.0
"numeric_field_1": 42.0,
"user": "Kimchy"
}

- do:
Expand All @@ -14,7 +15,8 @@ setup:
body: >
{
"numeric_field_1": 1.0,
"foo": 10.0
"foo": 10.0,
"user": "Kimchy"
}

- do:
Expand All @@ -31,7 +33,7 @@ setup:
{
"source": {
"index": "bwc_ml_outlier_detection_job_source",
"query": {"term" : { "user" : "Kimchy" }}
"query": {"term" : { "user.keyword" : "Kimchy" }}
},
"dest": {
"index": "old_cluster_outlier_detection_job_results"
Expand All @@ -50,7 +52,7 @@ setup:
{
"source": {
"index": "bwc_ml_regression_job_source",
"query": {"term" : { "user" : "Kimchy" }}
"query": {"term" : { "user.keyword" : "Kimchy" }}
},
"dest": {
"index": "old_cluster_regression_job_results"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
- match: { count: 1 }
- match: { data_frame_analytics.0.id: "old_cluster_outlier_detection_job" }
- match: { data_frame_analytics.0.source.index: ["bwc_ml_outlier_detection_job_source"] }
- match: { data_frame_analytics.0.source.query: {"term": { "user": "Kimchy" }} }
- match: { data_frame_analytics.0.source.query: {"term": { "user.keyword": "Kimchy" }} }
- match: { data_frame_analytics.0.dest.index: "old_cluster_outlier_detection_job_results" }
- match: { data_frame_analytics.0.analysis: {
"outlier_detection":{
Expand Down Expand Up @@ -36,7 +36,7 @@
- match: { count: 1 }
- match: { data_frame_analytics.0.id: "old_cluster_regression_job" }
- match: { data_frame_analytics.0.source.index: ["bwc_ml_regression_job_source"] }
- match: { data_frame_analytics.0.source.query: {"term": { "user": "Kimchy" }} }
- match: { data_frame_analytics.0.source.query: {"term": { "user.keyword": "Kimchy" }} }
- match: { data_frame_analytics.0.dest.index: "old_cluster_regression_job_results" }
- match: { data_frame_analytics.0.analysis.regression.dependent_variable: "foo" }
- match: { data_frame_analytics.0.analysis.regression.training_percent: 100.0 }
Expand All @@ -62,7 +62,7 @@
- match: { count: 1 }
- match: { data_frame_analytics.0.id: "mixed_cluster_outlier_detection_job" }
- match: { data_frame_analytics.0.source.index: ["bwc_ml_outlier_detection_job_source"] }
- match: { data_frame_analytics.0.source.query: {"term": { "user": "Kimchy" }} }
- match: { data_frame_analytics.0.source.query: {"term": { "user.keyword": "Kimchy" }} }
- match: { data_frame_analytics.0.dest.index: "mixed_cluster_outlier_detection_job_results" }
- match: { data_frame_analytics.0.analysis: {
"outlier_detection":{
Expand Down