From 3caeebf6ed460ec8980e7074483794111956688d Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Mon, 30 Sep 2019 18:20:59 +0300 Subject: [PATCH 1/9] Done --- .../migration/migrate_8_0/security.asciidoc | 9 +++ .../settings/security-settings.asciidoc | 26 +++++++-- .../ldap/support/SessionFactorySettings.java | 11 +++- .../authc/ldap/support/SessionFactory.java | 17 +++++- .../security/authc/RealmSettingsTests.java | 2 +- .../authc/ldap/LdapSessionFactoryTests.java | 2 +- .../ldap/support/SessionFactoryTests.java | 57 ++++++++++++++++++- 7 files changed, 111 insertions(+), 13 deletions(-) diff --git a/docs/reference/migration/migrate_8_0/security.asciidoc b/docs/reference/migration/migrate_8_0/security.asciidoc index f3ee4fd97ef85..e367e8d1ec4aa 100644 --- a/docs/reference/migration/migrate_8_0/security.asciidoc +++ b/docs/reference/migration/migrate_8_0/security.asciidoc @@ -41,3 +41,12 @@ realm directly. The `transport.profiles.*.xpack.security.type` setting has been removed since the Transport Client has been removed and therefore all client traffic now uses the HTTP transport. Transport profiles using this setting should be removed. + +[float] +[[ldap-ad-realms-tcp-read-timeout-removed]] +==== The `timeout.tcp_read` AD and LDAP realm settings have been removed + +The `timeout.tcp_read` Active Directory and LDAP realm settings have been +deprecated in favor of `timeout.response` since 7.5. They both have the exact +same effect but the new name clearly exposes that the timeout value represents +the time interval that the client waits for a response to a query. diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 9ace7928e2672..75dc9c9e23d63 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -445,9 +445,16 @@ An `s` at the end indicates seconds, or `ms` indicates milliseconds. Defaults to `5s` (5 seconds ). `timeout.tcp_read`:: -The TCP read timeout period after establishing an LDAP connection. -An `s` at the end indicates seconds, or `ms` indicates milliseconds. -Defaults to `5s` (5 seconds ). +deprecated[7.5] The TCP read timeout period after establishing an LDAP +connection. This is equivalent and is deprecated in favor of +`timeout.response` and they cannot be used simultaneously. An `s` at the end +indicates seconds, or `ms` indicates milliseconds. Defaults to the value of +`timeout.ldap_search`. + +`timeout.response`:: +The time interval to wait for the response from the LDAP server. An `s` at the +end indicates seconds, or `ms` indicates milliseconds. Defaults to the value of +`timeout.ldap_search`. `timeout.ldap_search`:: The LDAP Server enforced timeout period for an LDAP search. @@ -693,9 +700,16 @@ An `s` at the end indicates seconds, or `ms` indicates milliseconds. Defaults to `5s` (5 seconds ). `timeout.tcp_read`:: -The TCP read timeout period after establishing an LDAP connection. -An `s` at the end indicates seconds, or `ms` indicates milliseconds. -Defaults to `5s` (5 seconds ). +deprecated[7.5] The TCP read timeout period after establishing an LDAP +connection. This is equivalent and is deprecated in favor of +`timeout.response` and they cannot be used simultaneously. An `s` at the end +indicates seconds, or `ms` indicates milliseconds. Defaults to the value of +`timeout.ldap_search`. + +`timeout.response`:: +The time interval to wait for the response from the AD server. An `s` at the +end indicates seconds, or `ms` indicates milliseconds. Defaults to the value of +`timeout.ldap_search`. `timeout.ldap_search`:: The LDAP Server enforced timeout period for an LDAP search. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/SessionFactorySettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/SessionFactorySettings.java index 378cf5bd0e2a0..7ee8fa113eac7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/SessionFactorySettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/SessionFactorySettings.java @@ -25,12 +25,16 @@ public final class SessionFactorySettings { public static final Function> TIMEOUT_TCP_CONNECTION_SETTING = RealmSettings.affixSetting( "timeout.tcp_connect", key -> Setting.timeSetting(key, TIMEOUT_DEFAULT, Setting.Property.NodeScope)); - public static final Function> TIMEOUT_TCP_READ_SETTING = RealmSettings.affixSetting( - "timeout.tcp_read", key -> Setting.timeSetting(key, TIMEOUT_DEFAULT, Setting.Property.NodeScope)); - public static final Function> TIMEOUT_LDAP_SETTING = RealmSettings.affixSetting( "timeout.ldap_search", key -> Setting.timeSetting(key, TIMEOUT_DEFAULT, Setting.Property.NodeScope)); + public static final Function> TIMEOUT_TCP_READ_SETTING = RealmSettings.affixSetting( + "timeout.tcp_read", key -> Setting.timeSetting(key, TimeValue.MINUS_ONE, Setting.Property.NodeScope, + Setting.Property.Deprecated)); + + public static final Function> TIMEOUT_RESPONSE_SETTING = RealmSettings.affixSetting( + "timeout.response", key -> Setting.timeSetting(key, TimeValue.MINUS_ONE, Setting.Property.NodeScope)); + public static final Function> HOSTNAME_VERIFICATION_SETTING = RealmSettings.affixSetting( "hostname_verification", key -> Setting.boolSetting(key, true, Setting.Property.NodeScope, Setting.Property.Filtered)); @@ -49,6 +53,7 @@ public static Set> getSettings(String realmType) { settings.add(URLS_SETTING.apply(realmType)); settings.add(TIMEOUT_TCP_CONNECTION_SETTING.apply(realmType)); settings.add(TIMEOUT_TCP_READ_SETTING.apply(realmType)); + settings.add(TIMEOUT_RESPONSE_SETTING.apply(realmType)); settings.add(TIMEOUT_LDAP_SETTING.apply(realmType)); settings.add(HOSTNAME_VERIFICATION_SETTING.apply(realmType)); settings.add(FOLLOW_REFERRALS_SETTING.apply(realmType)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactory.java index 193254c7a3963..9eb755e703f9c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactory.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactory.java @@ -118,7 +118,22 @@ protected static LDAPConnectionOptions connectionOptions(RealmConfig config, LDAPConnectionOptions options = new LDAPConnectionOptions(); options.setConnectTimeoutMillis(Math.toIntExact(config.getSetting(SessionFactorySettings.TIMEOUT_TCP_CONNECTION_SETTING).millis())); options.setFollowReferrals(config.getSetting(SessionFactorySettings.FOLLOW_REFERRALS_SETTING)); - options.setResponseTimeoutMillis(config.getSetting(SessionFactorySettings.TIMEOUT_TCP_READ_SETTING).millis()); + final long responseTimeoutMillis; + if (config.hasSetting(SessionFactorySettings.TIMEOUT_RESPONSE_SETTING)) { + if (config.hasSetting(SessionFactorySettings.TIMEOUT_TCP_READ_SETTING)) { + throw new IllegalArgumentException("[" + RealmSettings.getFullSettingKey(config, + SessionFactorySettings.TIMEOUT_TCP_READ_SETTING) + "] and [" + RealmSettings.getFullSettingKey(config, + SessionFactorySettings.TIMEOUT_RESPONSE_SETTING) + "] may not be used at the same time"); + } + responseTimeoutMillis = config.getSetting(SessionFactorySettings.TIMEOUT_RESPONSE_SETTING).millis(); + } else { + if (config.hasSetting(SessionFactorySettings.TIMEOUT_TCP_READ_SETTING)) { + responseTimeoutMillis = config.getSetting(SessionFactorySettings.TIMEOUT_TCP_READ_SETTING).millis(); + } else { + responseTimeoutMillis = config.getSetting(SessionFactorySettings.TIMEOUT_LDAP_SETTING).millis(); + } + } + options.setResponseTimeoutMillis(responseTimeoutMillis); options.setAllowConcurrentSocketFactoryUse(true); final boolean verificationModeExists = config.hasSetting(SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java index d5e123c2313bb..e5cc563c3ad01 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java @@ -184,7 +184,7 @@ private Settings.Builder commonLdapSettings(String type, boolean configureSSL) { .put("unmapped_groups_as_roles", randomBoolean()) .put("files.role_mapping", "x-pack/" + randomAlphaOfLength(8) + ".yml") .put("timeout.tcp_connect", randomPositiveTimeValue()) - .put("timeout.tcp_read", randomPositiveTimeValue()) + .put("timeout.response", randomPositiveTimeValue()) .put("timeout.ldap_search", randomPositiveTimeValue()); if (configureSSL) { configureSsl("ssl.", builder, randomBoolean(), randomBoolean()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java index 9867cc29fd3da..1f18cc06059f7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java @@ -87,7 +87,7 @@ public void testBindWithReadTimeout() throws Exception { Settings settings = Settings.builder() .put(globalSettings) .put(buildLdapSettings(ldapUrl, userTemplates, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put(RealmSettings.getFullSettingKey(REALM_IDENTIFIER, SessionFactorySettings.TIMEOUT_TCP_READ_SETTING), "1ms") + .put(RealmSettings.getFullSettingKey(REALM_IDENTIFIER, SessionFactorySettings.TIMEOUT_RESPONSE_SETTING), "1ms") .put("path.home", createTempDir()) .build(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryTests.java index bb93e95950e86..58ab67ad0362b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; @@ -61,13 +62,67 @@ public void testConnectionFactoryReturnsCorrectLDAPConnectionOptionsWithDefaultS assertThat(options.getSSLSocketVerifier(), is(instanceOf(HostNameSSLSocketVerifier.class))); } + public void testSessionFactoryWithResponseTimeout() throws Exception { + final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("ldap", "response_settings"); + final Path pathHome = createTempDir(); + { + Settings settings = Settings.builder() + .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_RESPONSE_SETTING), "10s") + .put("path.home", pathHome) + .build(); + + final Environment environment = TestEnvironment.newEnvironment(settings); + RealmConfig realmConfig = new RealmConfig(realmId, settings, environment, new ThreadContext(settings)); + LDAPConnectionOptions options = SessionFactory.connectionOptions(realmConfig, new SSLService(settings, environment), logger); + assertThat(options.getResponseTimeoutMillis(), is(equalTo(10000L))); + } + { + Settings settings = Settings.builder() + .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_TCP_READ_SETTING), "7s") + .put("path.home", pathHome) + .build(); + + final Environment environment = TestEnvironment.newEnvironment(settings); + RealmConfig realmConfig = new RealmConfig(realmId, settings, environment, new ThreadContext(settings)); + LDAPConnectionOptions options = SessionFactory.connectionOptions(realmConfig, new SSLService(settings, environment), logger); + assertThat(options.getResponseTimeoutMillis(), is(equalTo(7000L))); + assertSettingDeprecationsAndWarnings(new Setting[]{SessionFactorySettings.TIMEOUT_TCP_READ_SETTING.apply("ldap") + .getConcreteSettingForNamespace("response_settings")}); + } + { + Settings settings = Settings.builder() + .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_RESPONSE_SETTING), "11s") + .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_TCP_READ_SETTING), "6s") + .put("path.home", pathHome) + .build(); + + final Environment environment = TestEnvironment.newEnvironment(settings); + RealmConfig realmConfig = new RealmConfig(realmId, settings, environment, new ThreadContext(settings)); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> SessionFactory.connectionOptions(realmConfig + , new SSLService(settings, environment), logger)); + assertThat(ex.getMessage(), is("[xpack.security.authc.realms.ldap.response_settings.timeout.tcp_read] and [xpack.security" + + ".authc.realms.ldap.response_settings.timeout.response] may not be used at the same time")); + } + { + Settings settings = Settings.builder() + .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_LDAP_SETTING), "750ms") + .put("path.home", pathHome) + .build(); + + final Environment environment = TestEnvironment.newEnvironment(settings); + RealmConfig realmConfig = new RealmConfig(realmId, settings, environment, new ThreadContext(settings)); + LDAPConnectionOptions options = SessionFactory.connectionOptions(realmConfig, new SSLService(settings, environment), logger); + assertThat(options.getResponseTimeoutMillis(), is(equalTo(750L))); + } + } + public void testConnectionFactoryReturnsCorrectLDAPConnectionOptions() throws Exception { final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("ldap", "conn_settings"); final Path pathHome = createTempDir(); Settings settings = Settings.builder() .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_TCP_CONNECTION_SETTING), "10ms") .put(getFullSettingKey(realmId, SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING), "false") - .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_TCP_READ_SETTING), "20ms") + .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_RESPONSE_SETTING), "20ms") .put(getFullSettingKey(realmId, SessionFactorySettings.FOLLOW_REFERRALS_SETTING), "false") .put("path.home", pathHome) .build(); From 15d68d4f61485db706303b49f452be91d6077962 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 1 Oct 2019 13:52:30 +0300 Subject: [PATCH 2/9] Update docs/reference/settings/security-settings.asciidoc Co-Authored-By: Ioannis Kakavas --- docs/reference/settings/security-settings.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 75dc9c9e23d63..75739e5a22907 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -701,7 +701,7 @@ Defaults to `5s` (5 seconds ). `timeout.tcp_read`:: deprecated[7.5] The TCP read timeout period after establishing an LDAP -connection. This is equivalent and is deprecated in favor of +connection. This is equivalent to and is deprecated in favor of `timeout.response` and they cannot be used simultaneously. An `s` at the end indicates seconds, or `ms` indicates milliseconds. Defaults to the value of `timeout.ldap_search`. From 5c0e05729d59c37e7bc0ea8c2fe87f62d92b697d Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 1 Oct 2019 13:53:14 +0300 Subject: [PATCH 3/9] Update docs/reference/settings/security-settings.asciidoc Co-Authored-By: Ioannis Kakavas --- docs/reference/settings/security-settings.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 75739e5a22907..5b84d8a51d141 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -446,7 +446,7 @@ Defaults to `5s` (5 seconds ). `timeout.tcp_read`:: deprecated[7.5] The TCP read timeout period after establishing an LDAP -connection. This is equivalent and is deprecated in favor of +connection. This is equivalent to and is deprecated in favor of `timeout.response` and they cannot be used simultaneously. An `s` at the end indicates seconds, or `ms` indicates milliseconds. Defaults to the value of `timeout.ldap_search`. From 0996a9cfd516ce6c177c01b198d044a42036c2fa Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 1 Oct 2019 14:06:16 +0300 Subject: [PATCH 4/9] refactored ldap_search explanation --- docs/reference/settings/security-settings.asciidoc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 5b84d8a51d141..66032d226f931 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -446,7 +446,7 @@ Defaults to `5s` (5 seconds ). `timeout.tcp_read`:: deprecated[7.5] The TCP read timeout period after establishing an LDAP -connection. This is equivalent to and is deprecated in favor of +connection. This is equivalent to and is deprecated in favor of `timeout.response` and they cannot be used simultaneously. An `s` at the end indicates seconds, or `ms` indicates milliseconds. Defaults to the value of `timeout.ldap_search`. @@ -457,7 +457,8 @@ end indicates seconds, or `ms` indicates milliseconds. Defaults to the value of `timeout.ldap_search`. `timeout.ldap_search`:: -The LDAP Server enforced timeout period for an LDAP search. +The timeout period for an LDAP search. The value is specified in the request +and is enforced by the receiving LDAP Server. An `s` at the end indicates seconds, or `ms` indicates milliseconds. Defaults to `5s` (5 seconds ). @@ -712,7 +713,8 @@ end indicates seconds, or `ms` indicates milliseconds. Defaults to the value of `timeout.ldap_search`. `timeout.ldap_search`:: -The LDAP Server enforced timeout period for an LDAP search. +The timeout period for an LDAP search. The value is specified in the request +and is enforced by the receiving LDAP Server. An `s` at the end indicates seconds, or `ms` indicates milliseconds. Defaults to `5s` (5 seconds ). From ac22cc87bb517d29e42478f036011bc1cec9a112 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Sun, 20 Oct 2019 15:58:02 +0300 Subject: [PATCH 5/9] Tim's review! --- docs/reference/migration/migrate_8_0/security.asciidoc | 9 --------- docs/reference/settings/security-settings.asciidoc | 3 +-- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/docs/reference/migration/migrate_8_0/security.asciidoc b/docs/reference/migration/migrate_8_0/security.asciidoc index e367e8d1ec4aa..f3ee4fd97ef85 100644 --- a/docs/reference/migration/migrate_8_0/security.asciidoc +++ b/docs/reference/migration/migrate_8_0/security.asciidoc @@ -41,12 +41,3 @@ realm directly. The `transport.profiles.*.xpack.security.type` setting has been removed since the Transport Client has been removed and therefore all client traffic now uses the HTTP transport. Transport profiles using this setting should be removed. - -[float] -[[ldap-ad-realms-tcp-read-timeout-removed]] -==== The `timeout.tcp_read` AD and LDAP realm settings have been removed - -The `timeout.tcp_read` Active Directory and LDAP realm settings have been -deprecated in favor of `timeout.response` since 7.5. They both have the exact -same effect but the new name clearly exposes that the timeout value represents -the time interval that the client waits for a response to a query. diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index e6a92f087bd7d..9e07524be90f0 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -445,8 +445,7 @@ Defaults to `5s` (5 seconds ). deprecated[7.5] The TCP read timeout period after establishing an LDAP connection. This is equivalent to and is deprecated in favor of `timeout.response` and they cannot be used simultaneously. An `s` at the end -indicates seconds, or `ms` indicates milliseconds. Defaults to the value of -`timeout.ldap_search`. +indicates seconds, or `ms` indicates milliseconds. `timeout.response`:: The time interval to wait for the response from the LDAP server. An `s` at the From 6f50c6de75af748cabe6bde08430cab7ed216b67 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Tue, 28 Jan 2020 12:46:00 +0000 Subject: [PATCH 6/9] [ML] Use CSV ingest processor in find_file_structure ingest pipeline (#51492) Changes the find_file_structure response to include a CSV ingest processor in the ingest pipeline it suggests. Previously the Kibana file upload functionality parsed CSV in the browser, but by parsing CSV in the ingest pipeline it makes the Kibana file upload functionality more easily interchangable with Filebeat such that the configurations it creates can more easily be used to import data with the same structure repeatedly in production. --- .../apis/find-file-structure.asciidoc | 127 +++++++++++++- .../DelimitedFileStructureFinder.java | 68 +++++--- .../FileStructureUtils.java | 40 ++++- .../NdJsonFileStructureFinder.java | 6 +- .../TextLogFileStructureFinder.java | 5 +- .../XmlFileStructureFinder.java | 5 +- .../DelimitedFileStructureFinderTests.java | 38 +++++ .../FileStructureUtilsTests.java | 155 +++++++++++++++++- 8 files changed, 403 insertions(+), 41 deletions(-) diff --git a/docs/reference/ml/anomaly-detection/apis/find-file-structure.asciidoc b/docs/reference/ml/anomaly-detection/apis/find-file-structure.asciidoc index 4f85e39d60aa0..0a1b2aaefef80 100644 --- a/docs/reference/ml/anomaly-detection/apis/find-file-structure.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/find-file-structure.asciidoc @@ -145,8 +145,8 @@ to request analysis of 100000 lines to achieve some variety. value is `true`. Otherwise, the default value is `false`. `timeout`:: - (Optional, <>) Sets the maximum amount of time that the - structure analysis make take. If the analysis is still running when the + (Optional, <>) Sets the maximum amount of time that the + structure analysis make take. If the analysis is still running when the timeout expires then it will be aborted. The default value is 25 seconds. `timestamp_field`:: @@ -163,8 +163,8 @@ also specified. For structured file formats, if you specify this parameter, the field must exist within the file. -If this parameter is not specified, the structure finder makes a decision about -which field (if any) is the primary timestamp field. For structured file +If this parameter is not specified, the structure finder makes a decision about +which field (if any) is the primary timestamp field. For structured file formats, it is not compulsory to have a timestamp in the file. -- @@ -213,14 +213,14 @@ format from a built-in set. The following table provides the appropriate `timeformat` values for some example timestamps: |=== -| Timeformat | Presentation +| Timeformat | Presentation | yyyy-MM-dd HH:mm:ssZ | 2019-04-20 13:15:22+0000 -| EEE, d MMM yyyy HH:mm:ss Z | Sat, 20 Apr 2019 13:15:22 +0000 +| EEE, d MMM yyyy HH:mm:ss Z | Sat, 20 Apr 2019 13:15:22 +0000 | dd.MM.yy HH:mm:ss.SSS | 20.04.19 13:15:22.285 |=== -See +See https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html[the Java date/time format documentation] for more information about date and time format syntax. @@ -675,6 +675,30 @@ If the request does not encounter errors, you receive the following result: "ingest_pipeline" : { "description" : "Ingest pipeline created by file structure finder", "processors" : [ + { + "csv" : { + "field" : "message", + "target_fields" : [ + "VendorID", + "tpep_pickup_datetime", + "tpep_dropoff_datetime", + "passenger_count", + "trip_distance", + "RatecodeID", + "store_and_fwd_flag", + "PULocationID", + "DOLocationID", + "payment_type", + "fare_amount", + "extra", + "mta_tax", + "tip_amount", + "tolls_amount", + "improvement_surcharge", + "total_amount" + ] + } + }, { "date" : { "field" : "tpep_pickup_datetime", @@ -683,6 +707,95 @@ If the request does not encounter errors, you receive the following result: "yyyy-MM-dd HH:mm:ss" ] } + }, + { + "convert" : { + "field" : "DOLocationID", + "type" : "long" + } + }, + { + "convert" : { + "field" : "PULocationID", + "type" : "long" + } + }, + { + "convert" : { + "field" : "RatecodeID", + "type" : "long" + } + }, + { + "convert" : { + "field" : "VendorID", + "type" : "long" + } + }, + { + "convert" : { + "field" : "extra", + "type" : "double" + } + }, + { + "convert" : { + "field" : "fare_amount", + "type" : "double" + } + }, + { + "convert" : { + "field" : "improvement_surcharge", + "type" : "double" + } + }, + { + "convert" : { + "field" : "mta_tax", + "type" : "double" + } + }, + { + "convert" : { + "field" : "passenger_count", + "type" : "long" + } + }, + { + "convert" : { + "field" : "payment_type", + "type" : "long" + } + }, + { + "convert" : { + "field" : "tip_amount", + "type" : "double" + } + }, + { + "convert" : { + "field" : "tolls_amount", + "type" : "double" + } + }, + { + "convert" : { + "field" : "total_amount", + "type" : "double" + } + }, + { + "convert" : { + "field" : "trip_distance", + "type" : "double" + } + }, + { + "remove" : { + "field" : "message" + } } ] }, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java index b947ed2d9cffe..befdf3d229f21 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java @@ -93,7 +93,18 @@ static DelimitedFileStructureFinder makeDelimitedFileStructureFinder(List, SortedMap> mappingsAndFieldStats = + FileStructureUtils.guessMappingsAndCalculateFieldStats(explanation, sampleRecords, timeoutChecker); + + SortedMap mappings = mappingsAndFieldStats.v1(); + + List columnNamesList = Arrays.asList(columnNames); char delimiter = (char) csvPreference.getDelimiterChar(); + char quoteChar = csvPreference.getQuoteChar(); + + Map csvProcessorSettings = makeCsvProcessorSettings("message", columnNamesList, delimiter, quoteChar, + trimFields); + FileStructure.Builder structureBuilder = new FileStructure.Builder(FileStructure.Format.DELIMITED) .setCharset(charsetName) .setHasByteOrderMarker(hasByteOrderMarker) @@ -102,8 +113,19 @@ static DelimitedFileStructureFinder makeDelimitedFileStructureFinder(List optQuote + column.replace(quote, twoQuotes).replaceAll(REGEX_NEEDS_ESCAPE_PATTERN, "\\\\$1") + optQuote) + .collect(Collectors.joining(delimiterMatcher))); + } if (trimFields) { structureBuilder.setShouldTrimFields(true); @@ -135,32 +157,20 @@ static DelimitedFileStructureFinder makeDelimitedFileStructureFinder(List optQuote + column.replace(quote, twoQuotes).replaceAll(REGEX_NEEDS_ESCAPE_PATTERN, "\\\\$1") + optQuote) - .collect(Collectors.joining(delimiterMatcher))); - } - boolean needClientTimeZone = timeField.v2().hasTimezoneDependentParsing(); structureBuilder.setTimestampField(timeField.v1()) .setJodaTimestampFormats(timeField.v2().getJodaTimestampFormats()) .setJavaTimestampFormats(timeField.v2().getJavaTimestampFormats()) .setNeedClientTimezone(needClientTimeZone) - .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), timeField.v1(), - timeField.v2().getJavaTimestampFormats(), needClientTimeZone)) + .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), csvProcessorSettings, + mappings, timeField.v1(), timeField.v2().getJavaTimestampFormats(), needClientTimeZone)) .setMultilineStartPattern(timeLineRegex); + } else { + structureBuilder.setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), + csvProcessorSettings, mappings, null, null, false)); } - Tuple, SortedMap> mappingsAndFieldStats = - FileStructureUtils.guessMappingsAndCalculateFieldStats(explanation, sampleRecords, timeoutChecker); - - SortedMap mappings = mappingsAndFieldStats.v1(); if (timeField != null) { mappings.put(FileStructureUtils.DEFAULT_TIMESTAMP_FIELD, FileStructureUtils.DATE_MAPPING_WITHOUT_FORMAT); } @@ -579,4 +589,24 @@ static boolean canCreateFromSample(List explanation, String sample, int private static boolean notUnexpectedEndOfFile(SuperCsvException e) { return e.getMessage().startsWith("unexpected end of file while reading quoted column") == false; } + + static Map makeCsvProcessorSettings(String field, List targetFields, char separator, char quote, boolean trim) { + + Map csvProcessorSettings = new LinkedHashMap<>(); + csvProcessorSettings.put("field", field); + csvProcessorSettings.put("target_fields", Collections.unmodifiableList(targetFields)); + if (separator != ',') { + // The value must be String, not Character, as XContent only works with String + csvProcessorSettings.put("separator", String.valueOf(separator)); + } + if (quote != '"') { + // The value must be String, not Character, as XContent only works with String + csvProcessorSettings.put("quote", String.valueOf(quote)); + } + csvProcessorSettings.put("ignore_missing", false); + if (trim) { + csvProcessorSettings.put("trim", true); + } + return Collections.unmodifiableMap(csvProcessorSettings); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java index e4945d3709860..14df58a35ce8f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.grok.Grok; import org.elasticsearch.ingest.Pipeline; import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; @@ -31,6 +32,8 @@ public final class FileStructureUtils { public static final String MAPPING_PROPERTIES_SETTING = "properties"; public static final Map DATE_MAPPING_WITHOUT_FORMAT = Collections.singletonMap(MAPPING_TYPE_SETTING, "date"); + public static final Set CONVERTIBLE_TYPES = + Collections.unmodifiableSet(Sets.newHashSet("integer", "long", "float", "double", "boolean")); private static final int NUM_TOP_HITS = 10; // NUMBER Grok pattern doesn't support scientific notation, so we extend it @@ -352,6 +355,9 @@ static boolean isMoreLikelyTextThanKeyword(String str) { * @param grokPattern The Grok pattern used for parsing semi-structured text formats. null for * fully structured formats. * @param customGrokPatternDefinitions The definitions for any custom patterns that {@code grokPattern} uses. + * @param csvProcessorSettings The CSV processor settings for delimited formats. null for + * non-delimited formats. + * @param mappingsForConversions Mappings (or partial mappings) that will be considered for field type conversions. * @param timestampField The input field containing the timestamp to be parsed into @timestamp. * null if there is no timestamp. * @param timestampFormats Timestamp formats to be used for parsing {@code timestampField}. @@ -360,10 +366,12 @@ static boolean isMoreLikelyTextThanKeyword(String str) { * @return The ingest pipeline definition, or null if none is required. */ public static Map makeIngestPipelineDefinition(String grokPattern, Map customGrokPatternDefinitions, + Map csvProcessorSettings, + Map mappingsForConversions, String timestampField, List timestampFormats, boolean needClientTimezone) { - if (grokPattern == null && timestampField == null) { + if (grokPattern == null && csvProcessorSettings == null && timestampField == null) { return null; } @@ -384,6 +392,10 @@ public static Map makeIngestPipelineDefinition(String grokPatter assert customGrokPatternDefinitions.isEmpty(); } + if (csvProcessorSettings != null) { + processors.add(Collections.singletonMap("csv", csvProcessorSettings)); + } + if (timestampField != null) { Map dateProcessorSettings = new LinkedHashMap<>(); dateProcessorSettings.put("field", timestampField); @@ -394,6 +406,32 @@ public static Map makeIngestPipelineDefinition(String grokPatter processors.add(Collections.singletonMap("date", dateProcessorSettings)); } + for (Map.Entry mapping : mappingsForConversions.entrySet()) { + String fieldName = mapping.getKey(); + Object values = mapping.getValue(); + if (values instanceof Map) { + Object type = ((Map) values).get(MAPPING_TYPE_SETTING); + if (CONVERTIBLE_TYPES.contains(type)) { + Map convertProcessorSettings = new LinkedHashMap<>(); + convertProcessorSettings.put("field", fieldName); + convertProcessorSettings.put("type", type); + convertProcessorSettings.put("ignore_missing", true); + processors.add(Collections.singletonMap("convert", convertProcessorSettings)); + } + } + } + + // This removes the unparsed message field for delimited formats (unless the same field name is used for one of the columns) + if (csvProcessorSettings != null) { + Object field = csvProcessorSettings.get("field"); + assert field != null; + Object targetFields = csvProcessorSettings.get("target_fields"); + assert targetFields instanceof List; + if (((List) targetFields).contains(field) == false) { + processors.add(Collections.singletonMap("remove", Collections.singletonMap("field", field))); + } + } + // This removes the interim timestamp field used for semi-structured text formats if (grokPattern != null && timestampField != null) { processors.add(Collections.singletonMap("remove", Collections.singletonMap("field", timestampField))); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java index 1b405eb685fa2..da103630be586 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java @@ -61,8 +61,10 @@ static NdJsonFileStructureFinder makeNdJsonFileStructureFinder(List expl .setJodaTimestampFormats(timeField.v2().getJodaTimestampFormats()) .setJavaTimestampFormats(timeField.v2().getJavaTimestampFormats()) .setNeedClientTimezone(needClientTimeZone) - .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), timeField.v1(), - timeField.v2().getJavaTimestampFormats(), needClientTimeZone)); + .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), null, + // Note: no convert processors are added based on mappings for NDJSON input + // because it's reasonable that _source matches the supplied JSON precisely + Collections.emptyMap(), timeField.v1(), timeField.v2().getJavaTimestampFormats(), needClientTimeZone)); } Tuple, SortedMap> mappingsAndFieldStats = diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java index e47d045dd257e..e5e9576b316aa 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java @@ -150,9 +150,8 @@ static TextLogFileStructureFinder makeTextLogFileStructureFinder(List ex .setJavaTimestampFormats(timestampFormatFinder.getJavaTimestampFormats()) .setNeedClientTimezone(needClientTimeZone) .setGrokPattern(grokPattern) - .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(grokPattern, - customGrokPatternDefinitions, interimTimestampField, - timestampFormatFinder.getJavaTimestampFormats(), needClientTimeZone)) + .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(grokPattern, customGrokPatternDefinitions, null, mappings, + interimTimestampField, timestampFormatFinder.getJavaTimestampFormats(), needClientTimeZone)) .setMappings(mappings) .setFieldStats(fieldStats) .setExplanation(explanation) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java index 91fc61bcbd4b4..94e698d269c5c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java @@ -102,8 +102,9 @@ static XmlFileStructureFinder makeXmlFileStructureFinder(List explanatio .setJodaTimestampFormats(timeField.v2().getJodaTimestampFormats()) .setJavaTimestampFormats(timeField.v2().getJavaTimestampFormats()) .setNeedClientTimezone(needClientTimeZone) - .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), - topLevelTag + "." + timeField.v1(), timeField.v2().getJavaTimestampFormats(), needClientTimeZone)); + .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), null, + Collections.emptyMap(), topLevelTag + "." + timeField.v1(), timeField.v2().getJavaTimestampFormats(), + needClientTimeZone)); } Tuple, SortedMap> mappingsAndFieldStats = diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java index 993343084a848..fbe94c92e8e56 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java @@ -15,11 +15,14 @@ import java.util.BitSet; import java.util.Collections; import java.util.List; +import java.util.Map; import static org.elasticsearch.xpack.ml.filestructurefinder.DelimitedFileStructureFinder.levenshteinFieldwiseCompareRows; import static org.elasticsearch.xpack.ml.filestructurefinder.DelimitedFileStructureFinder.levenshteinDistance; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.not; public class DelimitedFileStructureFinderTests extends FileStructureTestCase { @@ -583,4 +586,39 @@ public void testRowContainsDuplicateNonEmptyValues() { assertNull(DelimitedFileStructureFinder.findDuplicateNonEmptyValues(Arrays.asList("a", "", ""))); assertNull(DelimitedFileStructureFinder.findDuplicateNonEmptyValues(Arrays.asList("", "a", ""))); } + + public void testMakeCsvProcessorSettings() { + + String field = randomAlphaOfLength(10); + List targetFields = Arrays.asList(generateRandomStringArray(10, field.length() - 1, false , false)); + char separator = randomFrom(',', ';', '\t', '|'); + char quote = randomFrom('"', '\''); + boolean trim = randomBoolean(); + Map settings = DelimitedFileStructureFinder.makeCsvProcessorSettings(field, targetFields, separator, quote, trim); + assertThat(settings.get("field"), equalTo(field)); + assertThat(settings.get("target_fields"), equalTo(targetFields)); + assertThat(settings.get("ignore_missing"), equalTo(false)); + if (separator == ',') { + assertThat(settings, not(hasKey("separator"))); + } else { + assertThat(settings.get("separator"), equalTo(String.valueOf(separator))); + } + if (quote == '"') { + assertThat(settings, not(hasKey("quote"))); + } else { + assertThat(settings.get("quote"), equalTo(String.valueOf(quote))); + } + if (trim) { + assertThat(settings.get("trim"), equalTo(true)); + } else { + assertThat(settings, not(hasKey("trim"))); + } + } + + static Map randomCsvProcessorSettings() { + String field = randomAlphaOfLength(10); + return DelimitedFileStructureFinder.makeCsvProcessorSettings(field, + Arrays.asList(generateRandomStringArray(10, field.length() - 1, false , false)), randomFrom(',', ';', '\t', '|'), + randomFrom('"', '\''), randomBoolean()); + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java index a0f54c6b6f24f..d52c3cf87ddb9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java @@ -18,6 +18,8 @@ import static org.elasticsearch.xpack.ml.filestructurefinder.FileStructureOverrides.EMPTY_OVERRIDES; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; public class FileStructureUtilsTests extends FileStructureTestCase { @@ -346,21 +348,22 @@ public void testGuessMappingsAndCalculateFieldStats() { assertNull(fieldStats.get("nothing")); } - public void testMakeIngestPipelineDefinitionGivenStructuredWithoutTimestamp() { + public void testMakeIngestPipelineDefinitionGivenNdJsonWithoutTimestamp() { - assertNull(FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), null, null, false)); + assertNull(FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), null, Collections.emptyMap(), null, null, + false)); } @SuppressWarnings("unchecked") - public void testMakeIngestPipelineDefinitionGivenStructuredWithTimestamp() { + public void testMakeIngestPipelineDefinitionGivenNdJsonWithTimestamp() { String timestampField = randomAlphaOfLength(10); List timestampFormats = randomFrom(Collections.singletonList("ISO8601"), Arrays.asList("EEE MMM dd HH:mm:ss yyyy", "EEE MMM d HH:mm:ss yyyy")); boolean needClientTimezone = randomBoolean(); - Map pipeline = FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), timestampField, - timestampFormats, needClientTimezone); + Map pipeline = FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), null, + Collections.emptyMap(), timestampField, timestampFormats, needClientTimezone); assertNotNull(pipeline); assertEquals("Ingest pipeline created by file structure finder", pipeline.remove("description")); @@ -379,6 +382,144 @@ public void testMakeIngestPipelineDefinitionGivenStructuredWithTimestamp() { assertEquals(Collections.emptyMap(), pipeline); } + @SuppressWarnings("unchecked") + public void testMakeIngestPipelineDefinitionGivenDelimitedWithoutTimestamp() { + + Map csvProcessorSettings = DelimitedFileStructureFinderTests.randomCsvProcessorSettings(); + + Map pipeline = FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), csvProcessorSettings, + Collections.emptyMap(), null, null, false); + assertNotNull(pipeline); + + assertEquals("Ingest pipeline created by file structure finder", pipeline.remove("description")); + + List> processors = (List>) pipeline.remove("processors"); + assertNotNull(processors); + assertEquals(2, processors.size()); + + Map csvProcessor = (Map) processors.get(0).get("csv"); + assertNotNull(csvProcessor); + assertThat(csvProcessor.get("field"), instanceOf(String.class)); + assertThat(csvProcessor.get("target_fields"), instanceOf(List.class)); + + Map removeProcessor = (Map) processors.get(1).get("remove"); + assertNotNull(removeProcessor); + assertThat(csvProcessor.get("field"), equalTo(csvProcessorSettings.get("field"))); + + // After removing the two expected fields there should be nothing left in the pipeline + assertEquals(Collections.emptyMap(), pipeline); + } + + @SuppressWarnings("unchecked") + public void testMakeIngestPipelineDefinitionGivenDelimitedWithFieldInTargetFields() { + + Map csvProcessorSettings = new HashMap<>(DelimitedFileStructureFinderTests.randomCsvProcessorSettings()); + // Hack it so the field to be parsed is also one of the column names + String firstTargetField = ((List) csvProcessorSettings.get("target_fields")).get(0); + csvProcessorSettings.put("field", firstTargetField); + + Map pipeline = FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), csvProcessorSettings, + Collections.emptyMap(), null, null, false); + assertNotNull(pipeline); + + assertEquals("Ingest pipeline created by file structure finder", pipeline.remove("description")); + + List> processors = (List>) pipeline.remove("processors"); + assertNotNull(processors); + assertEquals(1, processors.size()); // 1 because there's no "remove" processor this time + + Map csvProcessor = (Map) processors.get(0).get("csv"); + assertNotNull(csvProcessor); + assertThat(csvProcessor.get("field"), equalTo(firstTargetField)); + assertThat(csvProcessor.get("target_fields"), instanceOf(List.class)); + assertThat(csvProcessor.get("ignore_missing"), equalTo(false)); + + // After removing the two expected fields there should be nothing left in the pipeline + assertEquals(Collections.emptyMap(), pipeline); + } + + @SuppressWarnings("unchecked") + public void testMakeIngestPipelineDefinitionGivenDelimitedWithConversion() { + + Map csvProcessorSettings = DelimitedFileStructureFinderTests.randomCsvProcessorSettings(); + boolean expectConversion = randomBoolean(); + String mappingType = expectConversion ? randomFrom("long", "double", "boolean") : randomFrom("keyword", "text", "date"); + String firstTargetField = ((List) csvProcessorSettings.get("target_fields")).get(0); + Map mappingsForConversions = + Collections.singletonMap(firstTargetField, Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, mappingType)); + + Map pipeline = FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), csvProcessorSettings, + mappingsForConversions, null, null, false); + assertNotNull(pipeline); + + assertEquals("Ingest pipeline created by file structure finder", pipeline.remove("description")); + + List> processors = (List>) pipeline.remove("processors"); + assertNotNull(processors); + assertEquals(expectConversion ? 3 : 2, processors.size()); + + Map csvProcessor = (Map) processors.get(0).get("csv"); + assertNotNull(csvProcessor); + assertThat(csvProcessor.get("field"), instanceOf(String.class)); + assertThat(csvProcessor.get("target_fields"), instanceOf(List.class)); + assertThat(csvProcessor.get("ignore_missing"), equalTo(false)); + + if (expectConversion) { + Map convertProcessor = (Map) processors.get(1).get("convert"); + assertNotNull(convertProcessor); + assertThat(convertProcessor.get("field"), equalTo(firstTargetField)); + assertThat(convertProcessor.get("type"), equalTo(mappingType)); + assertThat(convertProcessor.get("ignore_missing"), equalTo(true)); + } + + Map removeProcessor = (Map) processors.get(processors.size() - 1).get("remove"); + assertNotNull(removeProcessor); + assertThat(removeProcessor.get("field"), equalTo(csvProcessorSettings.get("field"))); + + // After removing the two expected fields there should be nothing left in the pipeline + assertEquals(Collections.emptyMap(), pipeline); + } + + @SuppressWarnings("unchecked") + public void testMakeIngestPipelineDefinitionGivenDelimitedWithTimestamp() { + + Map csvProcessorSettings = DelimitedFileStructureFinderTests.randomCsvProcessorSettings(); + + String timestampField = randomAlphaOfLength(10); + List timestampFormats = randomFrom(Collections.singletonList("ISO8601"), + Arrays.asList("EEE MMM dd HH:mm:ss yyyy", "EEE MMM d HH:mm:ss yyyy")); + boolean needClientTimezone = randomBoolean(); + + Map pipeline = FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), csvProcessorSettings, + Collections.emptyMap(), timestampField, timestampFormats, needClientTimezone); + assertNotNull(pipeline); + + assertEquals("Ingest pipeline created by file structure finder", pipeline.remove("description")); + + List> processors = (List>) pipeline.remove("processors"); + assertNotNull(processors); + assertEquals(3, processors.size()); + + Map csvProcessor = (Map) processors.get(0).get("csv"); + assertNotNull(csvProcessor); + assertThat(csvProcessor.get("field"), instanceOf(String.class)); + assertThat(csvProcessor.get("target_fields"), instanceOf(List.class)); + assertThat(csvProcessor.get("ignore_missing"), equalTo(false)); + + Map dateProcessor = (Map) processors.get(1).get("date"); + assertNotNull(dateProcessor); + assertEquals(timestampField, dateProcessor.get("field")); + assertEquals(needClientTimezone, dateProcessor.containsKey("timezone")); + assertEquals(timestampFormats, dateProcessor.get("formats")); + + Map removeProcessor = (Map) processors.get(2).get("remove"); + assertNotNull(removeProcessor); + assertThat(removeProcessor.get("field"), equalTo(csvProcessorSettings.get("field"))); + + // After removing the two expected fields there should be nothing left in the pipeline + assertEquals(Collections.emptyMap(), pipeline); + } + @SuppressWarnings("unchecked") public void testMakeIngestPipelineDefinitionGivenSemiStructured() { @@ -388,8 +529,8 @@ public void testMakeIngestPipelineDefinitionGivenSemiStructured() { Arrays.asList("EEE MMM dd HH:mm:ss yyyy", "EEE MMM d HH:mm:ss yyyy")); boolean needClientTimezone = randomBoolean(); - Map pipeline = FileStructureUtils.makeIngestPipelineDefinition(grokPattern, Collections.emptyMap(), timestampField, - timestampFormats, needClientTimezone); + Map pipeline = FileStructureUtils.makeIngestPipelineDefinition(grokPattern, Collections.emptyMap(), null, + Collections.emptyMap(), timestampField, timestampFormats, needClientTimezone); assertNotNull(pipeline); assertEquals("Ingest pipeline created by file structure finder", pipeline.remove("description")); From b299978b910828e1084bc56719aa79dd54f9a298 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 28 Jan 2020 08:08:32 -0500 Subject: [PATCH 7/9] Add test verify replica allocator with sync_id (#51512) We no longer issue new sync_ids in 8.0, but we still need to make sure that the replica allocator prefers copies with matching sync_id. This commit adds tests for that. Relates #50776 --- .../ReplicaShardAllocatorSyncIdIT.java | 248 ++++++++++++++++++ 1 file changed, 248 insertions(+) create mode 100644 server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java diff --git a/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java b/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java new file mode 100644 index 0000000000000..15f4ef97e8c73 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java @@ -0,0 +1,248 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway; + +import org.apache.lucene.index.IndexWriter; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.MergePolicyConfig; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineConfig; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.engine.InternalEngine; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTestCase; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; +import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.plugins.EnginePlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; + +/** + * A legacy version of {@link ReplicaShardAllocatorIT#testPreferCopyCanPerformNoopRecovery()} verifying + * that the {@link ReplicaShardAllocator} prefers copies with matching sync_id. + * TODO: Remove this test in 9.0 + */ +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class ReplicaShardAllocatorSyncIdIT extends ESIntegTestCase { + + private static class SyncedFlushEngine extends InternalEngine { + private volatile IndexWriter indexWriter; + + SyncedFlushEngine(EngineConfig engineConfig) { + super(engineConfig); + } + + @Override + protected void commitIndexWriter(IndexWriter writer, Translog translog) throws IOException { + indexWriter = writer; + super.commitIndexWriter(writer, translog); + } + + void syncFlush(String syncId) throws IOException { + assertNotNull(indexWriter); + try (ReleasableLock ignored = writeLock.acquire()) { + assertThat(getTranslogStats().getUncommittedOperations(), equalTo(0)); + Map userData = new HashMap<>(getLastCommittedSegmentInfos().userData); + userData.put(Engine.SYNC_COMMIT_ID, syncId); + indexWriter.setLiveCommitData(userData.entrySet()); + indexWriter.commit(); + } + } + } + + public static class SyncedFlushPlugin extends Plugin implements EnginePlugin { + @Override + public Optional getEngineFactory(IndexSettings indexSettings) { + return Optional.of(SyncedFlushEngine::new); + } + } + + @Override + protected boolean addMockInternalEngine() { + return false; + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class, InternalSettingsPlugin.class, SyncedFlushPlugin.class); + } + + private void syncFlush(String index) throws IOException { + String syncId = randomAlphaOfLength(10); + final Set nodes = internalCluster().nodesInclude(index); + for (String node : nodes) { + IndexService indexService = internalCluster().getInstance(IndicesService.class, node).indexServiceSafe(resolveIndex(index)); + for (IndexShard indexShard : indexService) { + SyncedFlushEngine engine = (SyncedFlushEngine) IndexShardTestCase.getEngine(indexShard); + engine.syncFlush(syncId); + } + } + } + + public void testPreferCopyCanPerformNoopRecovery() throws Exception { + String indexName = "test"; + String nodeWithPrimary = internalCluster().startNode(); + assertAcked( + client().admin().indices().prepareCreate(indexName) + .setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(MergePolicyConfig.INDEX_MERGE_ENABLED, "false") + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), "1ms") // expire PRRLs quickly + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms") + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "1ms"))); + String nodeWithReplica = internalCluster().startDataOnlyNode(); + Settings nodeWithReplicaSettings = internalCluster().dataPathSettings(nodeWithReplica); + ensureGreen(indexName); + indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, between(100, 500)) + .mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).collect(Collectors.toList())); + client().admin().indices().prepareFlush(indexName).get(); + if (randomBoolean()) { + client().admin().indices().prepareForceMerge(indexName).get(); + } + ensureGlobalCheckpointAdvancedAndSynced(indexName); + syncFlush(indexName); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeWithReplica)); + // Wait until the peer recovery retention leases of the offline node are expired + assertBusy(() -> { + for (ShardStats shardStats : client().admin().indices().prepareStats(indexName).get().getShards()) { + assertThat(shardStats.getRetentionLeaseStats().retentionLeases().leases(), hasSize(1)); + } + }); + CountDownLatch blockRecovery = new CountDownLatch(1); + CountDownLatch recoveryStarted = new CountDownLatch(1); + MockTransportService transportServiceOnPrimary + = (MockTransportService) internalCluster().getInstance(TransportService.class, nodeWithPrimary); + transportServiceOnPrimary.addSendBehavior((connection, requestId, action, request, options) -> { + if (PeerRecoveryTargetService.Actions.FILES_INFO.equals(action)) { + recoveryStarted.countDown(); + try { + blockRecovery.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + connection.sendRequest(requestId, action, request, options); + }); + internalCluster().startDataOnlyNode(); + recoveryStarted.await(); + nodeWithReplica = internalCluster().startDataOnlyNode(nodeWithReplicaSettings); + // AllocationService only calls GatewayAllocator if there're unassigned shards + assertAcked(client().admin().indices().prepareCreate("dummy-index").setWaitForActiveShards(0)); + ensureGreen(indexName); + assertThat(internalCluster().nodesInclude(indexName), hasItem(nodeWithReplica)); + assertNoOpRecoveries(indexName); + blockRecovery.countDown(); + transportServiceOnPrimary.clearAllRules(); + } + + public void testFullClusterRestartPerformNoopRecovery() throws Exception { + int numOfReplicas = randomIntBetween(1, 2); + internalCluster().ensureAtLeastNumDataNodes(numOfReplicas + 2); + String indexName = "test"; + assertAcked( + client().admin().indices().prepareCreate(indexName) + .setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) + .put(MergePolicyConfig.INDEX_MERGE_ENABLED, "false") + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), randomIntBetween(10, 100) + "kb") + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), "1ms") // expire PRRLs quickly + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms") + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms"))); + ensureGreen(indexName); + indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, between(200, 500)) + .mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).collect(Collectors.toList())); + client().admin().indices().prepareFlush(indexName).get(); + if (randomBoolean()) { + client().admin().indices().prepareForceMerge(indexName).get(); + } + ensureGlobalCheckpointAdvancedAndSynced(indexName); + syncFlush(indexName); + assertAcked(client().admin().cluster().prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("cluster.routing.allocation.enable", "primaries").build())); + internalCluster().fullRestart(); + ensureYellow(indexName); + // Wait until the peer recovery retention leases of the offline node are expired + assertBusy(() -> { + for (ShardStats shardStats : client().admin().indices().prepareStats(indexName).get().getShards()) { + assertThat(shardStats.getRetentionLeaseStats().retentionLeases().leases(), hasSize(1)); + } + }); + assertAcked(client().admin().cluster().prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("cluster.routing.allocation.enable").build())); + ensureGreen(indexName); + assertNoOpRecoveries(indexName); + } + + private void assertNoOpRecoveries(String indexName) { + for (RecoveryState recovery : client().admin().indices().prepareRecoveries(indexName).get().shardRecoveryStates().get(indexName)) { + if (recovery.getPrimary() == false) { + assertThat(recovery.getIndex().fileDetails(), empty()); + assertThat(recovery.getTranslog().totalLocal(), equalTo(recovery.getTranslog().totalOperations())); + } + } + } + + private void ensureGlobalCheckpointAdvancedAndSynced(String indexName) throws Exception { + assertBusy(() -> { + Index index = resolveIndex(indexName); + for (String node : internalCluster().nodesInclude(indexName)) { + IndexService indexService = internalCluster().getInstance(IndicesService.class, node).indexService(index); + if (indexService != null) { + for (IndexShard shard : indexService) { + assertThat(shard.getLastSyncedGlobalCheckpoint(), equalTo(shard.seqNoStats().getMaxSeqNo())); + } + } + } + }); + } +} From 6a869004dd3b90424f36c3822be5ac3dd9f9b387 Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Tue, 28 Jan 2020 13:22:39 +0000 Subject: [PATCH 8/9] Formatting: keep simple if / else on the same line (#51526) Previous the formatter was breaking simple if/else statements (i.e. without braces) onto separate lines, which could be fragile because the formatter cannot also introduce braces. Instead, keep such expressions on the same line. --- .eclipseformat.xml | 6 +++--- .../java/org/elasticsearch/gradle/JdkDownloadPlugin.java | 3 +-- .../gradle/precommit/TestingConventionRule.java | 6 ++---- .../elasticsearch/gradle/tar/SymbolicLinkPreservingTar.java | 3 +-- .../gradle/test/ErrorReportingTestListener.java | 6 ++---- .../gradle/testclusters/ElasticsearchCluster.java | 6 ++---- .../gradle/testclusters/ElasticsearchNode.java | 6 ++---- .../gradle/precommit/ForbiddenPatternsTaskTests.java | 3 +-- .../org/elasticsearch/gradle/test/JUnit3MethodProvider.java | 3 +-- .../java/org/elasticsearch/xpack/enrich/EnrichMetadata.java | 6 ++---- 10 files changed, 17 insertions(+), 31 deletions(-) diff --git a/.eclipseformat.xml b/.eclipseformat.xml index 9e913b41926a8..19ac64cbb4583 100644 --- a/.eclipseformat.xml +++ b/.eclipseformat.xml @@ -51,7 +51,7 @@ - + @@ -101,7 +101,7 @@ - + @@ -286,7 +286,7 @@ - + diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java index b1895727949ef..c808f2a026421 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java @@ -215,8 +215,7 @@ private static void setupRootJdkDownload(Project rootProject, String platform, S String[] pathSegments = details.getRelativePath().getSegments(); int index = 0; for (; index < pathSegments.length; index++) { - if (pathSegments[index].matches("jdk-.*")) - break; + if (pathSegments[index].matches("jdk-.*")) break; } assert index + 1 <= pathSegments.length; String[] newPathSegments = Arrays.copyOfRange(pathSegments, index + 1, pathSegments.length); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionRule.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionRule.java index 32838fd243d6f..c85e74b5218e2 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionRule.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionRule.java @@ -87,10 +87,8 @@ public Set getTaskNames() { @Override public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; TestingConventionRule that = (TestingConventionRule) o; return Objects.equals(suffix, that.suffix); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTar.java b/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTar.java index e92221f1f869e..f34a78754cbf1 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTar.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTar.java @@ -139,8 +139,7 @@ private boolean isChildOfVisitedSymbolicLink(final FileCopyDetailsInternal detai return false; } for (final File symbolicLink : visitedSymbolicLinks) { - if (isChildOf(symbolicLink, file)) - return true; + if (isChildOf(symbolicLink, file)) return true; } return false; } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/test/ErrorReportingTestListener.java b/buildSrc/src/main/java/org/elasticsearch/gradle/test/ErrorReportingTestListener.java index 37811f335763c..cc133bf28edf3 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/test/ErrorReportingTestListener.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/test/ErrorReportingTestListener.java @@ -193,10 +193,8 @@ public String getFullName() { @Override public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; Descriptor that = (Descriptor) o; return Objects.equals(name, that.name) && Objects.equals(className, that.className) && Objects.equals(parent, that.parent); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index c77e10c0b82f8..0c700e51972d8 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -442,10 +442,8 @@ public NamedDomainObjectContainer getNodes() { @Override public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; ElasticsearchCluster that = (ElasticsearchCluster) o; return Objects.equals(clusterName, that.clusterName) && Objects.equals(path, that.path); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index fda8d6ab9b307..86d05cd93649d 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -1209,10 +1209,8 @@ void waitForAllConditions() { @Override public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; ElasticsearchNode that = (ElasticsearchNode) o; return Objects.equals(name, that.name) && Objects.equals(path, that.path); } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/ForbiddenPatternsTaskTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/ForbiddenPatternsTaskTests.java index 7d61cb46e77ea..9cea6dd9cd0f9 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/ForbiddenPatternsTaskTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/ForbiddenPatternsTaskTests.java @@ -82,8 +82,7 @@ private void writeSourceFile(Project project, String name, String... lines) thro file.getParentFile().mkdirs(); file.createNewFile(); - if (lines.length != 0) - Files.write(file.toPath(), Arrays.asList(lines), StandardCharsets.UTF_8); + if (lines.length != 0) Files.write(file.toPath(), Arrays.asList(lines), StandardCharsets.UTF_8); } private void checkAndAssertTaskSuccessful(ForbiddenPatternsTask task) throws IOException { diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/JUnit3MethodProvider.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/JUnit3MethodProvider.java index 4b3384eb117c3..b407b0d5fdfd1 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/JUnit3MethodProvider.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/JUnit3MethodProvider.java @@ -40,8 +40,7 @@ public Collection getTestMethods(Class suiteClass, ClassModel classMo ArrayList result = new ArrayList<>(); for (MethodModel mm : methods.values()) { // Skip any methods that have overrieds/ shadows. - if (mm.getDown() != null) - continue; + if (mm.getDown() != null) continue; Method m = mm.element; if (m.getName().startsWith("test") diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichMetadata.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichMetadata.java index 1788a8756666b..57677f57bfa5c 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichMetadata.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichMetadata.java @@ -108,10 +108,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; EnrichMetadata that = (EnrichMetadata) o; return policies.equals(that.policies); } From 1033388b922615351c7889c92e18a97851aa7422 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 28 Jan 2020 16:53:14 +0200 Subject: [PATCH 9/9] Nits --- docs/reference/settings/security-settings.asciidoc | 4 ++-- .../security/authc/ldap/support/SessionFactoryTests.java | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 8c02f69256dec..e45ae8ccbbe62 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -443,7 +443,7 @@ An `s` at the end indicates seconds, or `ms` indicates milliseconds. Defaults to `5s` (5 seconds ). `timeout.tcp_read`:: -deprecated[7.5] The TCP read timeout period after establishing an LDAP +deprecated[7.7] The TCP read timeout period after establishing an LDAP connection. This is equivalent to and is deprecated in favor of `timeout.response` and they cannot be used simultaneously. An `s` at the end indicates seconds, or `ms` indicates milliseconds. @@ -698,7 +698,7 @@ An `s` at the end indicates seconds, or `ms` indicates milliseconds. Defaults to `5s` (5 seconds ). `timeout.tcp_read`:: -deprecated[7.5] The TCP read timeout period after establishing an LDAP +deprecated[7.7] The TCP read timeout period after establishing an LDAP connection. This is equivalent to and is deprecated in favor of `timeout.response` and they cannot be used simultaneously. An `s` at the end indicates seconds, or `ms` indicates milliseconds. Defaults to the value of diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryTests.java index 130c28c81dbbd..db757189fd5ba 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryTests.java @@ -73,6 +73,7 @@ public void testSessionFactoryWithResponseTimeout() throws Exception { { Settings settings = Settings.builder() .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_RESPONSE_SETTING), "10s") + .put(getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0) .put("path.home", pathHome) .build(); @@ -84,6 +85,7 @@ public void testSessionFactoryWithResponseTimeout() throws Exception { { Settings settings = Settings.builder() .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_TCP_READ_SETTING), "7s") + .put(getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0) .put("path.home", pathHome) .build(); @@ -98,6 +100,7 @@ public void testSessionFactoryWithResponseTimeout() throws Exception { Settings settings = Settings.builder() .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_RESPONSE_SETTING), "11s") .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_TCP_READ_SETTING), "6s") + .put(getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0) .put("path.home", pathHome) .build(); @@ -111,6 +114,7 @@ public void testSessionFactoryWithResponseTimeout() throws Exception { { Settings settings = Settings.builder() .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_LDAP_SETTING), "750ms") + .put(getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0) .put("path.home", pathHome) .build();